code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) lowerCAmelCase : Optional[int] = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = ["""BeitFeatureExtractor"""] lowerCAmelCase : Optional[Any] = ["""BeitImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BeitForImageClassification""", """BeitForMaskedImageModeling""", """BeitForSemanticSegmentation""", """BeitModel""", """BeitPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = [ """FlaxBeitForImageClassification""", """FlaxBeitForMaskedImageModeling""", """FlaxBeitModel""", """FlaxBeitPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
'''simple docstring''' def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1] _lowerCAmelCase : int = 6 _lowerCAmelCase : Dict = 1 _lowerCAmelCase : Optional[int] = 1_9_0_1 _lowerCAmelCase : Optional[Any] = 0 while year < 2_0_0_1: day += 7 if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] elif day > 2_9 and month == 2: month += 1 _lowerCAmelCase : List[str] = day - 2_9 else: if day > days_per_month[month - 1]: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] if month > 1_2: year += 1 _lowerCAmelCase : Optional[int] = 1 if year < 2_0_0_1 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
25
1
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin lowerCAmelCase : str = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=16 , snake_case__=13 , snake_case__=7 , snake_case__=14 , snake_case__=10 , snake_case__=19 , snake_case__=5 , snake_case__=4 , snake_case__=True , snake_case__=16 , snake_case__=2 , snake_case__=4 , snake_case__=4 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=[1, 2, 3, 4, 5] , snake_case__=25 , snake_case__=5 , ): '''simple docstring''' _lowerCAmelCase : str = d_model _lowerCAmelCase : Optional[int] = parent _lowerCAmelCase : str = batch_size _lowerCAmelCase : Optional[int] = prediction_length _lowerCAmelCase : str = context_length _lowerCAmelCase : Dict = cardinality _lowerCAmelCase : List[str] = num_time_features _lowerCAmelCase : List[str] = lags_sequence _lowerCAmelCase : int = embedding_dimension _lowerCAmelCase : Any = is_training _lowerCAmelCase : Optional[Any] = hidden_size _lowerCAmelCase : Optional[int] = num_hidden_layers _lowerCAmelCase : Any = num_attention_heads _lowerCAmelCase : List[str] = intermediate_size _lowerCAmelCase : Optional[Any] = hidden_act _lowerCAmelCase : Optional[Any] = hidden_dropout_prob _lowerCAmelCase : Tuple = attention_probs_dropout_prob _lowerCAmelCase : Union[str, Any] = context_length _lowerCAmelCase : Optional[Any] = prediction_length + label_length _lowerCAmelCase : List[Any] = label_length _lowerCAmelCase : str = moving_average _lowerCAmelCase : str = autocorrelation_factor def a ( self ): '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = config.context_length + max(config.lags_sequence ) _lowerCAmelCase : Dict = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) _lowerCAmelCase : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) _lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, _past_length] ) _lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs _lowerCAmelCase : List[str] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) _lowerCAmelCase : Any = floats_tensor([self.batch_size, config.prediction_length] ) _lowerCAmelCase : Any = { 'past_values': past_values, 'static_categorical_features': static_categorical_features, 'past_time_features': past_time_features, 'past_observed_mask': past_observed_mask, 'future_time_features': future_time_features, 'future_values': future_values, } return inputs_dict def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.get_config() _lowerCAmelCase : Optional[int] = self.prepare_autoformer_inputs_dict(snake_case__ ) return config, inputs_dict def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = AutoformerModel(config=snake_case__ ).to(snake_case__ ).eval() _lowerCAmelCase : int = model(**snake_case__ ) _lowerCAmelCase : List[str] = outputs.encoder_last_hidden_state _lowerCAmelCase : Union[str, Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase : int = model.get_encoder() encoder.save_pretrained(snake_case__ ) _lowerCAmelCase : Optional[Any] = AutoformerEncoder.from_pretrained(snake_case__ ).to(snake_case__ ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = model.create_network_inputs(**snake_case__ ) _lowerCAmelCase , _lowerCAmelCase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) _lowerCAmelCase : int = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) _lowerCAmelCase : Tuple = encoder(inputs_embeds=snake_case__ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) _lowerCAmelCase : List[Any] = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) _lowerCAmelCase : Tuple = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) _lowerCAmelCase : Any = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) _lowerCAmelCase : Union[str, Any] = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase : Optional[Any] = model.get_decoder() decoder.save_pretrained(snake_case__ ) _lowerCAmelCase : Optional[int] = AutoformerDecoder.from_pretrained(snake_case__ ).to(snake_case__ ) _lowerCAmelCase : Union[str, Any] = decoder( trend=snake_case__ , inputs_embeds=snake_case__ , encoder_hidden_states=snake_case__ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () __magic_name__ = (AutoformerForPrediction,) if is_torch_available() else () __magic_name__ = {"feature-extraction": AutoformerModel} if is_torch_available() else {} __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = AutoformerModelTester(self ) _lowerCAmelCase : Any = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _lowerCAmelCase : Any = model_class(snake_case__ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case__ ) _lowerCAmelCase , _lowerCAmelCase : List[Any] = model_class.from_pretrained(snake_case__ , output_loading_info=snake_case__ ) self.assertEqual(info['missing_keys'] , [] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*snake_case__ ) @unittest.skip(reason='Model has no tokens embeddings' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = inspect.signature(getattr(snake_case__ , 'forward' ) ) # The main input is the name of the argument after `self` _lowerCAmelCase : Optional[Any] = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Tuple = model_class(snake_case__ ) _lowerCAmelCase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : List[Any] = [*signature.parameters.keys()] _lowerCAmelCase : Any = [ 'past_values', 'past_time_features', 'past_observed_mask', 'static_categorical_features', 'static_real_features', 'future_values', 'future_time_features', ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append('future_observed_mask' ) expected_arg_names.extend( [ 'decoder_attention_mask', 'head_mask', 'decoder_head_mask', 'cross_attn_head_mask', 'encoder_outputs', 'past_key_values', 'output_hidden_states', 'output_attentions', 'use_cache', 'return_dict', ] ) self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Union[str, Any] = True _lowerCAmelCase : str = getattr(self.model_tester , 'seq_length' , snake_case__ ) _lowerCAmelCase : str = getattr(self.model_tester , 'decoder_seq_length' , snake_case__ ) _lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , snake_case__ ) _lowerCAmelCase : Tuple = getattr(self.model_tester , 'd_model' , snake_case__ ) _lowerCAmelCase : List[str] = getattr(self.model_tester , 'num_attention_heads' , snake_case__ ) _lowerCAmelCase : Optional[int] = d_model // num_attention_heads for model_class in self.all_model_classes: _lowerCAmelCase : str = True _lowerCAmelCase : Optional[Any] = False _lowerCAmelCase : int = True _lowerCAmelCase : Optional[int] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): _lowerCAmelCase : str = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) _lowerCAmelCase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowerCAmelCase : List[str] = True _lowerCAmelCase : Any = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): _lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) _lowerCAmelCase : Tuple = outputs.encoder_attentions self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) _lowerCAmelCase : Tuple = len(snake_case__ ) _lowerCAmelCase : List[str] = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(snake_case__ , snake_case__ ) # decoder attentions _lowerCAmelCase : int = outputs.decoder_attentions self.assertIsInstance(snake_case__ , (list, tuple) ) self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions _lowerCAmelCase : Any = outputs.cross_attentions self.assertIsInstance(snake_case__ , (list, tuple) ) self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine _lowerCAmelCase : List[str] = True _lowerCAmelCase : Any = True _lowerCAmelCase : Union[str, Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): _lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) self.assertEqual(out_len + 2 , len(snake_case__ ) ) _lowerCAmelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def a ( self ): '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def lowercase (_A="train-batch.pt" ): """simple docstring""" _lowerCAmelCase : Any = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=_A , repo_type='dataset' ) _lowerCAmelCase : Any = torch.load(_A , map_location=_A ) return batch @require_torch @slow class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(snake_case__ ) _lowerCAmelCase : str = prepare_batch() with torch.no_grad(): _lowerCAmelCase : List[str] = model( past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0] _lowerCAmelCase : Tuple = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , snake_case__ ) _lowerCAmelCase : Optional[int] = torch.tensor( [[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=snake_case__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case__ , atol=snake_case__ ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = prepare_batch('val-batch.pt' ) with torch.no_grad(): _lowerCAmelCase : Any = model( past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state _lowerCAmelCase : str = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , snake_case__ ) _lowerCAmelCase : Union[str, Any] = torch.tensor( [[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=snake_case__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case__ , atol=snake_case__ ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(snake_case__ ) _lowerCAmelCase : str = prepare_batch('val-batch.pt' ) with torch.no_grad(): _lowerCAmelCase : Optional[Any] = model.generate( static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , ) _lowerCAmelCase : Tuple = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , snake_case__ ) _lowerCAmelCase : Tuple = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=snake_case__ ) _lowerCAmelCase : List[Any] = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case__ , rtol=1E-1 ) )
25
'''simple docstring''' def lowercase (_A = 1_0_0_0_0_0_0 ): """simple docstring""" _lowerCAmelCase : Any = set(range(3 , _A , 2 ) ) primes.add(2 ) for p in range(3 , _A , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _A , _A ) ) ) _lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )] for p in primes: for n in range(_A , limit + 1 , _A ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' def lowercase (_A , _A ): """simple docstring""" return int((input_a, input_a).count(0 ) != 0 ) def lowercase (): """simple docstring""" assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
25
'''simple docstring''' import argparse import os import re lowerCAmelCase : Tuple = """src/transformers""" # Pattern that looks at the indentation in a line. lowerCAmelCase : str = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""") def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = _re_indent.search(_A ) return "" if search is None else search.groups()[0] def lowercase (_A , _A="" , _A=None , _A=None ): """simple docstring""" _lowerCAmelCase : int = 0 _lowerCAmelCase : Dict = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 _lowerCAmelCase : Dict = ['\n'.join(lines[:index] )] else: _lowerCAmelCase : str = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCAmelCase : List[Any] = [lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_A ) ) if index < len(_A ) - 1: _lowerCAmelCase : Union[str, Any] = [lines[index + 1]] index += 1 else: _lowerCAmelCase : Union[str, Any] = [] else: blocks.append('\n'.join(_A ) ) _lowerCAmelCase : List[str] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append('\n'.join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append('\n'.join(lines[index:] ) ) return blocks def lowercase (_A ): """simple docstring""" def _inner(_A ): return key(_A ).lower().replace('_' , '' ) return _inner def lowercase (_A , _A=None ): """simple docstring""" def noop(_A ): return x if key is None: _lowerCAmelCase : List[Any] = noop # Constants are all uppercase, they go first. _lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. _lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()] _lowerCAmelCase : Dict = ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def lowercase (_A ): """simple docstring""" def _replace(_A ): _lowerCAmelCase : Dict = match.groups()[0] if "," not in imports: return f'[{imports}]' _lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : int = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" _lowerCAmelCase : Tuple = import_statement.split('\n' ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1 _lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] ) _lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : List[str] = keys[:-1] _lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line _lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A ) return import_statement def lowercase (_A , _A=True ): """simple docstring""" with open(_A , encoding='utf-8' ) as f: _lowerCAmelCase : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCAmelCase : Tuple = split_code_in_indented_blocks( _A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCAmelCase : Tuple = main_blocks[block_idx] _lowerCAmelCase : int = block.split('\n' ) # Get to the start of the imports. _lowerCAmelCase : Tuple = 0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCAmelCase : Dict = len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. _lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] ) _lowerCAmelCase : Tuple = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None] _lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCAmelCase : int = 0 _lowerCAmelCase : Optional[Any] = [] for i in range(len(_A ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: _lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. _lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(_A ) ) def lowercase (_A=True ): """simple docstring""" _lowerCAmelCase : int = [] for root, _, files in os.walk(_A ): if "__init__.py" in files: _lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A ) if result: _lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCAmelCase : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
25
1
'''simple docstring''' from __future__ import annotations def lowercase (_A , _A , _A , _A , _A , ): """simple docstring""" _lowerCAmelCase : str = len(_A ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_A ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _A , _A , ) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : list[list[str]] = [] depth_first_search([] , [] , [] , _A , _A ) # Print all the boards for board in boards: for column in board: print(_A ) print('' ) print(len(_A ) , 'solutions were found.' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
25
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = KandinskyVaaInpaintPipeline __magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"] __magic_name__ = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] __magic_name__ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__ = False @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return self.time_input_dim @property def a ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def a ( self ): '''simple docstring''' return 100 @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ ) return model @property def a ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs ) return model def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.dummy_unet _lowerCAmelCase : List[Any] = self.dummy_movq _lowerCAmelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , ) _lowerCAmelCase : Any = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create init_image _lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) ) # create mask _lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase : Dict = 0 if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Optional[int] = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = 'cpu' _lowerCAmelCase : int = self.get_dummy_components() _lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) ) _lowerCAmelCase : int = output.images _lowerCAmelCase : int = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] _lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] _lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(F'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : List[str] = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def a ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) _lowerCAmelCase : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : List[str] = 'a hat' _lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) _lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) _lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _lowerCAmelCase : Optional[Any] = pipeline( image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
25
1
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def lowercase (_A , _A ): """simple docstring""" if inductance <= 0: raise ValueError('Inductance cannot be 0 or negative' ) elif capacitance <= 0: raise ValueError('Capacitance cannot be 0 or negative' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
25
'''simple docstring''' from __future__ import annotations from typing import Any def lowercase (_A ): """simple docstring""" if not postfix_notation: return 0 _lowerCAmelCase : int = {'+', '-', '*', '/'} _lowerCAmelCase : list[Any] = [] for token in postfix_notation: if token in operations: _lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_A ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : List[str] = { """configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""], """tokenization_convbert""": ["""ConvBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = ["""ConvBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvBertForMaskedLM""", """ConvBertForMultipleChoice""", """ConvBertForQuestionAnswering""", """ConvBertForSequenceClassification""", """ConvBertForTokenClassification""", """ConvBertLayer""", """ConvBertModel""", """ConvBertPreTrainedModel""", """load_tf_weights_in_convbert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ """TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFConvBertForMaskedLM""", """TFConvBertForMultipleChoice""", """TFConvBertForQuestionAnswering""", """TFConvBertForSequenceClassification""", """TFConvBertForTokenClassification""", """TFConvBertLayer""", """TFConvBertModel""", """TFConvBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mobilenet_v2" def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Union[str, Any] = image_size _lowerCAmelCase : List[Any] = depth_multiplier _lowerCAmelCase : List[Any] = depth_divisible_by _lowerCAmelCase : Optional[Any] = min_depth _lowerCAmelCase : str = expand_ratio _lowerCAmelCase : str = output_stride _lowerCAmelCase : Any = first_layer_is_expansion _lowerCAmelCase : int = finegrained_output _lowerCAmelCase : str = hidden_act _lowerCAmelCase : List[str] = tf_padding _lowerCAmelCase : Optional[int] = classifier_dropout_prob _lowerCAmelCase : int = initializer_range _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : str = semantic_loss_ignore_index class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = version.parse("1.11" ) @property def a ( self ): '''simple docstring''' return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' return 1E-4
25
1
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Tuple = set() # edges = list of graph's edges _lowerCAmelCase : int = get_edges(_A ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = edges.pop() chosen_vertices.add(_A ) chosen_vertices.add(_A ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(_A ) return chosen_vertices def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[Any] = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
25
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER _lowerCAmelCase : Optional[int] = 'pt' _lowerCAmelCase : Tuple = 'tf' def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ ) model_tf.save_pretrained(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = 'mock_framework' # Framework provided - return whatever the user provides _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(snake_case__ ): _lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase : Any = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase : int = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase : str = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): with self.assertRaises(snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
25
1
'''simple docstring''' from __future__ import annotations from typing import Any def lowercase (_A ): """simple docstring""" create_state_space_tree(_A , [] , 0 ) def lowercase (_A , _A , _A ): """simple docstring""" if index == len(_A ): print(_A ) return create_state_space_tree(_A , _A , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(_A , _A , index + 1 ) current_subsequence.pop() if __name__ == "__main__": lowerCAmelCase : list[Any] = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["""A""", """B""", """C"""]) generate_all_subsequences(seq)
25
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase : Optional[int] = None lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Any = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """facebook/nllb-large-en-ro""": 10_24, """facebook/nllb-200-distilled-600M""": 10_24, } # fmt: off lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = NllbTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token _lowerCAmelCase : Dict = legacy_behaviour super().__init__( vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[str] = vocab_file _lowerCAmelCase : int = False if not self.vocab_file else True _lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _lowerCAmelCase : Any = { lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn' _lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang ) _lowerCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def a ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _lowerCAmelCase : Optional[Any] = src_lang _lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ ) _lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ ) _lowerCAmelCase : Optional[Any] = tgt_lang_id return inputs def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[str] = src_lang _lowerCAmelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def a ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def a ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : Dict = [] _lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : int = [self.eos_token_id] _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : int = [] _lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : List[str] = [self.eos_token_id] _lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return _lowerCAmelCase : Union[str, Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
25
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = params _lowerCAmelCase : Dict = np.array(snake_case__ ) _lowerCAmelCase : Optional[Any] = np.array([len(snake_case__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , snake_case__ ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self ): '''simple docstring''' return len(self.lengths ) def a ( self ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.params.max_model_input_size _lowerCAmelCase : int = self.lengths > max_len logger.info(F'Splitting {sum(snake_case__ )} too long sequences.' ) def divide_chunks(snake_case__ , snake_case__ ): return [l[i : i + n] for i in range(0 , len(snake_case__ ) , snake_case__ )] _lowerCAmelCase : Optional[Any] = [] _lowerCAmelCase : str = [] if self.params.mlm: _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: _lowerCAmelCase , _lowerCAmelCase : str = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: _lowerCAmelCase : str = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: _lowerCAmelCase : Optional[Any] = np.insert(snake_case__ , 0 , snake_case__ ) if sub_s[-1] != sep_id: _lowerCAmelCase : str = np.insert(snake_case__ , len(snake_case__ ) , snake_case__ ) assert len(snake_case__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(snake_case__ ) new_tok_ids.extend(snake_case__ ) new_lengths.extend([len(snake_case__ ) for l in sub_seqs] ) _lowerCAmelCase : int = np.array(snake_case__ ) _lowerCAmelCase : Any = np.array(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = len(self ) _lowerCAmelCase : List[str] = self.lengths > 11 _lowerCAmelCase : Optional[Any] = self.token_ids[indices] _lowerCAmelCase : List[Any] = self.lengths[indices] _lowerCAmelCase : Optional[int] = len(self ) logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' ) def a ( self ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: _lowerCAmelCase : Any = self.params.special_tok_ids['unk_token'] _lowerCAmelCase : Tuple = len(self ) _lowerCAmelCase : Any = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) _lowerCAmelCase : Tuple = (unk_occs / self.lengths) < 0.5 _lowerCAmelCase : Union[str, Any] = self.token_ids[indices] _lowerCAmelCase : Tuple = self.lengths[indices] _lowerCAmelCase : Union[str, Any] = len(self ) logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' ) def a ( self ): '''simple docstring''' if not self.params.is_master: return logger.info(F'{len(self )} sequences' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = [t[0] for t in batch] _lowerCAmelCase : str = [t[1] for t in batch] assert len(snake_case__ ) == len(snake_case__ ) # Max for paddings _lowerCAmelCase : Dict = max(snake_case__ ) # Pad token ids if self.params.mlm: _lowerCAmelCase : Any = self.params.special_tok_ids['pad_token'] else: _lowerCAmelCase : Any = self.params.special_tok_ids['unk_token'] _lowerCAmelCase : Tuple = [list(t.astype(snake_case__ ) ) + [pad_idx] * (max_seq_len_ - len(snake_case__ )) for t in token_ids] assert len(tk_ ) == len(snake_case__ ) assert all(len(snake_case__ ) == max_seq_len_ for t in tk_ ) _lowerCAmelCase : Optional[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_) _lowerCAmelCase : List[str] = torch.tensor(snake_case__ ) # (bs) return tk_t, lg_t
25
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase : List[str] = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def lowercase (_A ): """simple docstring""" for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""") lowerCAmelCase : Dict = parser.parse_args() if args.check_lib: lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""") lowerCAmelCase : int = Path(transformers_module.__file__).parent else: lowerCAmelCase : int = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
25
1
'''simple docstring''' import requests def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = {'Content-Type': 'application/json'} _lowerCAmelCase : List[str] = requests.post(_A , json={'text': message_body} , headers=_A ) if response.status_code != 2_0_0: _lowerCAmelCase : Any = ( 'Request to slack returned an error ' f'{response.status_code}, the response is:\n{response.text}' ) raise ValueError(_A ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
25
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" _lowerCAmelCase : List[str] = '' _lowerCAmelCase : Any = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i _lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )] # for each character in new_string find corresponding palindromic string _lowerCAmelCase : Any = 0 for j in range(len(_A ) ): _lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowerCAmelCase : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741 _lowerCAmelCase : int = j + k - 1 # update max_length and start position if max_length < length[j]: _lowerCAmelCase : Dict = length[j] _lowerCAmelCase : Optional[int] = j # create that string _lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' def lowercase (_A ): """simple docstring""" if len(_A ) < 2: return collection def circle_sort_util(_A , _A , _A ) -> bool: _lowerCAmelCase : Optional[Any] = False if low == high: return swapped _lowerCAmelCase : Optional[int] = low _lowerCAmelCase : Dict = high while left < right: if collection[left] > collection[right]: _lowerCAmelCase , _lowerCAmelCase : Optional[int] = ( collection[right], collection[left], ) _lowerCAmelCase : Tuple = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: _lowerCAmelCase , _lowerCAmelCase : Dict = ( collection[right + 1], collection[left], ) _lowerCAmelCase : int = True _lowerCAmelCase : List[Any] = low + int((high - low) / 2 ) _lowerCAmelCase : Any = circle_sort_util(_A , _A , _A ) _lowerCAmelCase : Optional[Any] = circle_sort_util(_A , mid + 1 , _A ) return swapped or left_swap or right_swap _lowerCAmelCase : Any = True while is_not_sorted is True: _lowerCAmelCase : Dict = circle_sort_util(_A , 0 , len(_A ) - 1 ) return collection if __name__ == "__main__": lowerCAmelCase : List[str] = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase : Union[str, Any] = [int(item) for item in user_input.split(""",""")] print(circle_sort(unsorted))
25
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = 0 __magic_name__ = False __magic_name__ = 3.0 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _lowerCAmelCase : str = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , snake_case__ ) @require_multi_gpu def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00) lowerCAmelCase : List[str] = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase : List[Any] = """""" lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
25
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowerCAmelCase : Optional[int] = logging.getLogger(__name__) @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __magic_name__ = field( default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __magic_name__ = field( default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} ) __magic_name__ = field( default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __magic_name__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __magic_name__ = field( default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = field( metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} ) __magic_name__ = field( default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , ) __magic_name__ = field( default=1_2_8 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__ = field( default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def lowercase (): """simple docstring""" _lowerCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) _lowerCAmelCase : Tuple = import_module('tasks' ) try: _lowerCAmelCase : Dict = getattr(_A , model_args.task_type ) _lowerCAmelCase : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ' f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , _A ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task _lowerCAmelCase : str = token_classification_task.get_labels(data_args.labels ) _lowerCAmelCase : Dict[int, str] = dict(enumerate(_A ) ) _lowerCAmelCase : Optional[int] = len(_A ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_A , idalabel=_A , labelaid={label: i for i, label in enumerate(_A )} , cache_dir=model_args.cache_dir , ) _lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) _lowerCAmelCase : List[Any] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , ) # Get datasets _lowerCAmelCase : Optional[int] = ( TokenClassificationDataset( token_classification_task=_A , data_dir=data_args.data_dir , tokenizer=_A , labels=_A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) _lowerCAmelCase : Optional[int] = ( TokenClassificationDataset( token_classification_task=_A , data_dir=data_args.data_dir , tokenizer=_A , labels=_A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(_A , _A ) -> Tuple[List[int], List[int]]: _lowerCAmelCase : Any = np.argmax(_A , axis=2 ) _lowerCAmelCase , _lowerCAmelCase : Tuple = preds.shape _lowerCAmelCase : Any = [[] for _ in range(_A )] _lowerCAmelCase : List[Any] = [[] for _ in range(_A )] for i in range(_A ): for j in range(_A ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(_A ) -> Dict: _lowerCAmelCase , _lowerCAmelCase : List[Any] = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(_A , _A ), "precision": precision_score(_A , _A ), "recall": recall_score(_A , _A ), "f1": fa_score(_A , _A ), } # Data collator _lowerCAmelCase : Dict = DataCollatorWithPadding(_A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _lowerCAmelCase : Dict = Trainer( model=_A , args=_A , train_dataset=_A , eval_dataset=_A , compute_metrics=_A , data_collator=_A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _lowerCAmelCase : Dict = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) _lowerCAmelCase : Optional[Any] = trainer.evaluate() _lowerCAmelCase : Optional[int] = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(_A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , _A , _A ) writer.write('%s = %s\n' % (key, value) ) results.update(_A ) # Predict if training_args.do_predict: _lowerCAmelCase : str = TokenClassificationDataset( token_classification_task=_A , data_dir=data_args.data_dir , tokenizer=_A , labels=_A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = trainer.predict(_A ) _lowerCAmelCase , _lowerCAmelCase : Tuple = align_predictions(_A , _A ) _lowerCAmelCase : Tuple = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(_A , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , _A , _A ) writer.write('%s = %s\n' % (key, value) ) # Save predictions _lowerCAmelCase : Optional[Any] = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(_A , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(_A , _A , _A ) return results def lowercase (_A ): """simple docstring""" main() if __name__ == "__main__": main()
25
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "trajectory_transformer" __magic_name__ = ["past_key_values"] __magic_name__ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = action_weight _lowerCAmelCase : Optional[int] = reward_weight _lowerCAmelCase : Union[str, Any] = value_weight _lowerCAmelCase : List[str] = max_position_embeddings _lowerCAmelCase : Tuple = block_size _lowerCAmelCase : List[Any] = action_dim _lowerCAmelCase : List[Any] = observation_dim _lowerCAmelCase : Union[str, Any] = transition_dim _lowerCAmelCase : Tuple = learning_rate _lowerCAmelCase : int = n_layer _lowerCAmelCase : Any = n_head _lowerCAmelCase : Tuple = n_embd _lowerCAmelCase : Optional[Any] = embd_pdrop _lowerCAmelCase : Union[str, Any] = attn_pdrop _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : List[Any] = layer_norm_eps _lowerCAmelCase : Union[str, Any] = kaiming_initializer_range _lowerCAmelCase : List[Any] = use_cache super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
25
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase : Union[str, Any] = 25_00_04 lowerCAmelCase : int = 25_00_20 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = MBartaaTokenizer __magic_name__ = MBartaaTokenizerFast __magic_name__ = True __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = '<s>' _lowerCAmelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case__ ) , 1054 ) def a ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) _lowerCAmelCase : Any = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def a ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) _lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=True _lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=False _lowerCAmelCase : Optional[int] = tempfile.mkdtemp() _lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = "facebook/mbart-large-50-one-to-many-mmt" __magic_name__ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] __magic_name__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] __magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def a ( cls ): '''simple docstring''' _lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) _lowerCAmelCase : Dict = 1 return cls def a ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) def a ( self ): '''simple docstring''' self.assertIn(snake_case__ , self.tokenizer.all_special_ids ) _lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] _lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) _lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertNotIn(self.tokenizer.eos_token , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , snake_case__ ) _lowerCAmelCase : List[str] = 10 _lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0] self.assertEqual(ids[0] , snake_case__ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case__ ) _lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' ) _lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) _lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' ) _lowerCAmelCase : str = self.tokenizer( text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' ) _lowerCAmelCase : List[Any] = targets['input_ids'] _lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(snake_case__ ) , { # en_XX, A, test, EOS 'input_ids': [[25_0004, 62, 3034, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_0001, } , )
25
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = KandinskyVaaInpaintPipeline __magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"] __magic_name__ = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] __magic_name__ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__ = False @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return self.time_input_dim @property def a ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def a ( self ): '''simple docstring''' return 100 @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ ) return model @property def a ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs ) return model def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.dummy_unet _lowerCAmelCase : List[Any] = self.dummy_movq _lowerCAmelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , ) _lowerCAmelCase : Any = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create init_image _lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) ) # create mask _lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase : Dict = 0 if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Optional[int] = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = 'cpu' _lowerCAmelCase : int = self.get_dummy_components() _lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) ) _lowerCAmelCase : int = output.images _lowerCAmelCase : int = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] _lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] _lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(F'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : List[str] = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def a ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) _lowerCAmelCase : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : List[str] = 'a hat' _lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) _lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) _lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _lowerCAmelCase : Optional[Any] = pipeline( image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
25
'''simple docstring''' from math import isqrt def lowercase (_A ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) ) def lowercase (_A = 1_0**6 ): """simple docstring""" _lowerCAmelCase : str = 0 _lowerCAmelCase : str = 1 _lowerCAmelCase : List[str] = 7 while prime_candidate < max_prime: primes_count += is_prime(_A ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[str] = 3_8_4 _lowerCAmelCase : Union[str, Any] = 7 if "tiny" in model_name: _lowerCAmelCase : Optional[int] = 9_6 _lowerCAmelCase : Optional[int] = (2, 2, 6, 2) _lowerCAmelCase : Union[str, Any] = (3, 6, 1_2, 2_4) elif "small" in model_name: _lowerCAmelCase : Optional[int] = 9_6 _lowerCAmelCase : int = (2, 2, 1_8, 2) _lowerCAmelCase : List[str] = (3, 6, 1_2, 2_4) elif "base" in model_name: _lowerCAmelCase : List[str] = 1_2_8 _lowerCAmelCase : Dict = (2, 2, 1_8, 2) _lowerCAmelCase : Dict = (4, 8, 1_6, 3_2) _lowerCAmelCase : List[str] = 1_2 _lowerCAmelCase : Dict = 5_1_2 elif "large" in model_name: _lowerCAmelCase : Union[str, Any] = 1_9_2 _lowerCAmelCase : Any = (2, 2, 1_8, 2) _lowerCAmelCase : Dict = (6, 1_2, 2_4, 4_8) _lowerCAmelCase : str = 1_2 _lowerCAmelCase : str = 7_6_8 # set label information _lowerCAmelCase : List[str] = 1_5_0 _lowerCAmelCase : Union[str, Any] = 'huggingface/label-files' _lowerCAmelCase : List[Any] = 'ade20k-id2label.json' _lowerCAmelCase : List[str] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) ) _lowerCAmelCase : Tuple = {int(_A ): v for k, v in idalabel.items()} _lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()} _lowerCAmelCase : str = SwinConfig( embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) _lowerCAmelCase : List[Any] = UperNetConfig( backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , ) return config def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[int] = [] # fmt: off # stem rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : str = dct.pop(_A ) _lowerCAmelCase : Tuple = val def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowerCAmelCase : Union[str, Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowerCAmelCase : Dict = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' ) _lowerCAmelCase : Dict = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _lowerCAmelCase : Tuple = in_proj_weight[:dim, :] _lowerCAmelCase : List[Any] = in_proj_bias[: dim] _lowerCAmelCase : Dict = in_proj_weight[ dim : dim * 2, : ] _lowerCAmelCase : Tuple = in_proj_bias[ dim : dim * 2 ] _lowerCAmelCase : Dict = in_proj_weight[ -dim :, : ] _lowerCAmelCase : Any = in_proj_bias[-dim :] # fmt: on def lowercase (_A ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase : str = x.shape _lowerCAmelCase : List[str] = x.reshape(_A , 4 , in_channel // 4 ) _lowerCAmelCase : Dict = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A ) return x def lowercase (_A ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase : Tuple = x.shape _lowerCAmelCase : Dict = x.reshape(_A , in_channel // 4 , 4 ) _lowerCAmelCase : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A ) return x def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[str] = x.shape[0] _lowerCAmelCase : Tuple = x.reshape(4 , in_channel // 4 ) _lowerCAmelCase : Any = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A ) return x def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[str] = x.shape[0] _lowerCAmelCase : Tuple = x.reshape(in_channel // 4 , 4 ) _lowerCAmelCase : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A ) return x def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : str = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } _lowerCAmelCase : int = model_name_to_url[model_name] _lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(_A , map_location='cpu' , file_name=_A )[ 'state_dict' ] for name, param in state_dict.items(): print(_A , param.shape ) _lowerCAmelCase : Optional[Any] = get_upernet_config(_A ) _lowerCAmelCase : List[str] = UperNetForSemanticSegmentation(_A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): _lowerCAmelCase : List[Any] = state_dict.pop(_A ) if "bn" in key: _lowerCAmelCase : Optional[int] = key.replace('bn' , 'batch_norm' ) _lowerCAmelCase : Dict = val # rename keys _lowerCAmelCase : Optional[int] = create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_q_k_v(_A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: _lowerCAmelCase : List[str] = reverse_correct_unfold_reduction_order(_A ) if "norm" in key: _lowerCAmelCase : int = reverse_correct_unfold_norm_order(_A ) model.load_state_dict(_A ) # verify on image _lowerCAmelCase : List[Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' _lowerCAmelCase : Optional[Any] = Image.open(requests.get(_A , stream=_A ).raw ).convert('RGB' ) _lowerCAmelCase : str = SegformerImageProcessor() _lowerCAmelCase : List[Any] = processor(_A , return_tensors='pt' ).pixel_values with torch.no_grad(): _lowerCAmelCase : List[Any] = model(_A ) _lowerCAmelCase : Tuple = outputs.logits print(logits.shape ) print('First values of logits:' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": _lowerCAmelCase : Optional[Any] = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": _lowerCAmelCase : Dict = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": _lowerCAmelCase : Union[str, Any] = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": _lowerCAmelCase : str = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(_A ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(_A ) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(f'openmmlab/{model_name}' ) processor.push_to_hub(f'openmmlab/{model_name}' ) if __name__ == "__main__": lowerCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[F'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCAmelCase : List[str] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
25
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mvp" __magic_name__ = ["past_key_values"] __magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = max_position_embeddings _lowerCAmelCase : Optional[Any] = d_model _lowerCAmelCase : Optional[int] = encoder_ffn_dim _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = encoder_attention_heads _lowerCAmelCase : Any = decoder_ffn_dim _lowerCAmelCase : Optional[Any] = decoder_layers _lowerCAmelCase : int = decoder_attention_heads _lowerCAmelCase : Union[str, Any] = dropout _lowerCAmelCase : List[Any] = attention_dropout _lowerCAmelCase : List[str] = activation_dropout _lowerCAmelCase : Optional[Any] = activation_function _lowerCAmelCase : Any = init_std _lowerCAmelCase : Any = encoder_layerdrop _lowerCAmelCase : Union[str, Any] = decoder_layerdrop _lowerCAmelCase : Optional[int] = classifier_dropout _lowerCAmelCase : List[Any] = use_cache _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCAmelCase : Optional[Any] = use_prompt _lowerCAmelCase : Optional[Any] = prompt_length _lowerCAmelCase : Any = prompt_mid_dim super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ): _lowerCAmelCase : Any = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
25
1
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowercase (_A ): """simple docstring""" _lowerCAmelCase : str = filter(lambda _A : p.requires_grad , model.parameters() ) _lowerCAmelCase : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase : str = logging.getLogger(__name__) def lowercase (_A , _A ): """simple docstring""" if metric == "rouge2": _lowerCAmelCase : Any = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": _lowerCAmelCase : List[Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": _lowerCAmelCase : List[str] = '{val_avg_em:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) _lowerCAmelCase : Tuple = ModelCheckpoint( dirpath=_A , filename=_A , monitor=f'val_{metric}' , mode='max' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def lowercase (_A , _A ): """simple docstring""" return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=_A , verbose=_A , ) class UpperCamelCase__ ( pl.Callback ): """simple docstring""" def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(snake_case__ ) @rank_zero_only def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=True ): '''simple docstring''' logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) _lowerCAmelCase : List[str] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results _lowerCAmelCase : Any = Path(pl_module.hparams.output_dir ) if type_path == "test": _lowerCAmelCase : int = od / 'test_results.txt' _lowerCAmelCase : str = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _lowerCAmelCase : Union[str, Any] = od / F'{type_path}_results/{trainer.global_step:05d}.txt' _lowerCAmelCase : List[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=snake_case__ ) generations_file.parent.mkdir(exist_ok=snake_case__ ) with open(snake_case__ , 'a+' ) as writer: for key in sorted(snake_case__ ): if key in ["log", "progress_bar", "preds"]: continue _lowerCAmelCase : Any = metrics[key] if isinstance(snake_case__ , torch.Tensor ): _lowerCAmelCase : List[Any] = val.item() _lowerCAmelCase : List[Any] = F'{key}: {val:.6f}\n' writer.write(snake_case__ ) if not save_generations: return if "preds" in metrics: _lowerCAmelCase : Union[str, Any] = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(snake_case__ ) @rank_zero_only def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' try: _lowerCAmelCase : Optional[int] = pl_module.model.model.num_parameters() except AttributeError: _lowerCAmelCase : Optional[int] = pl_module.model.num_parameters() _lowerCAmelCase : int = count_trainable_parameters(snake_case__ ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(snake_case__ , snake_case__ , 'test' ) @rank_zero_only def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
25
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) lowerCAmelCase : str = None lowerCAmelCase : Optional[int] = { """7B""": 1_10_08, """13B""": 1_38_24, """30B""": 1_79_20, """65B""": 2_20_16, """70B""": 2_86_72, } lowerCAmelCase : Optional[int] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def lowercase (_A , _A=1 , _A=2_5_6 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowercase (_A ): """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def lowercase (_A , _A ): """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def lowercase (_A , _A , _A , _A=True ): """simple docstring""" os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) ) _lowerCAmelCase : List[str] = NUM_SHARDS[model_size] _lowerCAmelCase : str = params['n_layers'] _lowerCAmelCase : Optional[int] = params['n_heads'] _lowerCAmelCase : int = n_heads // num_shards _lowerCAmelCase : Optional[int] = params['dim'] _lowerCAmelCase : Union[str, Any] = dim // n_heads _lowerCAmelCase : Union[str, Any] = 10_000.0 _lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA _lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads _lowerCAmelCase : Optional[int] = dim // num_key_value_heads else: # compatibility with other checkpoints _lowerCAmelCase : Union[str, Any] = n_heads _lowerCAmelCase : Any = n_heads_per_shard _lowerCAmelCase : Optional[Any] = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'Fetching all parameters from the checkpoint at {input_base_path}.' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded _lowerCAmelCase : List[Any] = [ torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' ) for i in range(_A ) ] _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Union[str, Any] = {'weight_map': {}} for layer_i in range(_A ): _lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : str = { f'model.layers.{layer_i}.self_attn.q_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wq.weight'] ), f'model.layers.{layer_i}.self_attn.k_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wk.weight'] ), f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'], f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'], f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'], f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'], f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'], f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'], f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _lowerCAmelCase : str = { f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][ f'layers.{layer_i}.attention_norm.weight' ].clone(), f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][ f'layers.{layer_i}.ffn_norm.weight' ].clone(), } _lowerCAmelCase : List[str] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) _lowerCAmelCase : Optional[int] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wk.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) _lowerCAmelCase : Dict = torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wv.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) _lowerCAmelCase : Dict = torch.cat( [loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : Tuple = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : int = inv_freq for k, v in state_dict.items(): _lowerCAmelCase : Optional[Any] = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) _lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : List[str] = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: _lowerCAmelCase : List[str] = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): _lowerCAmelCase : int = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs _lowerCAmelCase : Tuple = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) _lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 _lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6 _lowerCAmelCase : List[Any] = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) _lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' ) _lowerCAmelCase : List[Any] = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) _lowerCAmelCase : Any = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) _lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
25
1
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case__ , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(snake_case__ , 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(snake_case__ , 'num_attention_heads' ) ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=32 , snake_case__=2 , snake_case__=3 , snake_case__=640 , snake_case__=4 , snake_case__="silu" , snake_case__=3 , snake_case__=32 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = parent _lowerCAmelCase : Dict = batch_size _lowerCAmelCase : Tuple = image_size _lowerCAmelCase : Optional[int] = patch_size _lowerCAmelCase : str = num_channels _lowerCAmelCase : Optional[Any] = last_hidden_size _lowerCAmelCase : Optional[int] = num_attention_heads _lowerCAmelCase : int = hidden_act _lowerCAmelCase : Dict = conv_kernel_size _lowerCAmelCase : str = output_stride _lowerCAmelCase : Any = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : Any = classifier_dropout_prob _lowerCAmelCase : str = use_labels _lowerCAmelCase : List[str] = is_training _lowerCAmelCase : List[str] = num_labels _lowerCAmelCase : Any = initializer_range _lowerCAmelCase : Dict = scope def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : Dict = None if self.use_labels: _lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase : Any = self.get_config() return config, pixel_values, labels, pixel_labels def a ( self ): '''simple docstring''' return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = MobileViTModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : str = model(snake_case__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = self.num_labels _lowerCAmelCase : Tuple = MobileViTForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Union[str, Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = self.num_labels _lowerCAmelCase : List[Any] = MobileViTForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Any = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _lowerCAmelCase : Tuple = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs _lowerCAmelCase : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) __magic_name__ = ( { "feature-extraction": MobileViTModel, "image-classification": MobileViTForImageClassification, "image-segmentation": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = MobileViTModelTester(self ) _lowerCAmelCase : List[Any] = MobileViTConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViT does not output attentions' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Any = model_class(snake_case__ ) _lowerCAmelCase : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()] _lowerCAmelCase : str = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case__ ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ): _lowerCAmelCase : Optional[int] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): _lowerCAmelCase : Dict = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) _lowerCAmelCase : List[str] = outputs.hidden_states _lowerCAmelCase : Union[str, Any] = 5 self.assertEqual(len(snake_case__ ) , snake_case__ ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. _lowerCAmelCase : Tuple = 2 for i in range(len(snake_case__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : List[str] = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase : Any = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) @slow def a ( self ): '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : str = MobileViTModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def a ( self ): '''simple docstring''' return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = self.default_image_processor _lowerCAmelCase : Dict = prepare_img() _lowerCAmelCase : List[Any] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : Optional[int] = model(**snake_case__ ) # verify the logits _lowerCAmelCase : Tuple = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) _lowerCAmelCase : str = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) _lowerCAmelCase : Any = model.to(snake_case__ ) _lowerCAmelCase : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) _lowerCAmelCase : List[Any] = prepare_img() _lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : List[str] = model(**snake_case__ ) _lowerCAmelCase : Any = outputs.logits # verify the logits _lowerCAmelCase : Dict = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , snake_case__ ) _lowerCAmelCase : Union[str, Any] = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) _lowerCAmelCase : Optional[int] = model.to(snake_case__ ) _lowerCAmelCase : Optional[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) _lowerCAmelCase : Optional[Any] = prepare_img() _lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : Any = model(**snake_case__ ) _lowerCAmelCase : List[str] = outputs.logits.detach().cpu() _lowerCAmelCase : Dict = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] ) _lowerCAmelCase : Any = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , snake_case__ ) _lowerCAmelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , snake_case__ )
25
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = 1 __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None def a ( self ): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
25
1
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase : Tuple = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "gptj" __magic_name__ = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[str] = vocab_size _lowerCAmelCase : Union[str, Any] = n_positions _lowerCAmelCase : Optional[Any] = n_embd _lowerCAmelCase : Optional[int] = n_layer _lowerCAmelCase : Dict = n_head _lowerCAmelCase : Optional[int] = n_inner _lowerCAmelCase : Dict = rotary_dim _lowerCAmelCase : List[Any] = activation_function _lowerCAmelCase : Tuple = resid_pdrop _lowerCAmelCase : Tuple = embd_pdrop _lowerCAmelCase : Optional[Any] = attn_pdrop _lowerCAmelCase : List[str] = layer_norm_epsilon _lowerCAmelCase : Tuple = initializer_range _lowerCAmelCase : Optional[int] = use_cache _lowerCAmelCase : Any = bos_token_id _lowerCAmelCase : List[str] = eos_token_id super().__init__( bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ ) if not getattr(self._config , 'pad_token_id' , snake_case__ ): # TODO: how to do that better? _lowerCAmelCase : Optional[Any] = 0 @property def a ( self ): '''simple docstring''' _lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(snake_case__ , direction='inputs' ) _lowerCAmelCase : Tuple = {0: 'batch', 1: 'past_sequence + sequence'} else: _lowerCAmelCase : int = {0: 'batch', 1: 'sequence'} return common_inputs @property def a ( self ): '''simple docstring''' return self._config.n_layer @property def a ( self ): '''simple docstring''' return self._config.n_head def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): '''simple docstring''' _lowerCAmelCase : List[str] = super(snake_case__ , self ).generate_dummy_inputs( snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) # We need to order the input in the way they appears in the forward() _lowerCAmelCase : Tuple = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _lowerCAmelCase , _lowerCAmelCase : int = common_inputs['input_ids'].shape # Not using the same length for past_key_values _lowerCAmelCase : Tuple = seqlen + 2 _lowerCAmelCase : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _lowerCAmelCase : List[Any] = [ (torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers ) ] _lowerCAmelCase : Dict = common_inputs['attention_mask'] if self.use_past: _lowerCAmelCase : Tuple = ordered_inputs['attention_mask'].dtype _lowerCAmelCase : Union[str, Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 ) return ordered_inputs @property def a ( self ): '''simple docstring''' return 13
25
'''simple docstring''' lowerCAmelCase : List[str] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCAmelCase : List[str] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
25
1
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Tuple = [1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = 0, 0, 0 _lowerCAmelCase : Optional[Any] = ugly_nums[ia] * 2 _lowerCAmelCase : Optional[int] = ugly_nums[ia] * 3 _lowerCAmelCase : Optional[int] = ugly_nums[ia] * 5 for _ in range(1 , _A ): _lowerCAmelCase : Any = min(_A , _A , _A ) ugly_nums.append(_A ) if next_num == next_a: ia += 1 _lowerCAmelCase : List[Any] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 _lowerCAmelCase : Union[str, Any] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 _lowerCAmelCase : Any = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F'''{ugly_numbers(2_00) = }''')
25
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
25
1
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : List[str] = list(_A ) _lowerCAmelCase : Dict = list(_A ) _lowerCAmelCase : str = 0 for i in range(len(_A ) ): if lista[i] != lista[i]: count += 1 _lowerCAmelCase : Union[str, Any] = '_' if count > 1: return False else: return "".join(_A ) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[int] = [] while True: _lowerCAmelCase : Optional[Any] = ['$'] * len(_A ) _lowerCAmelCase : List[Any] = [] for i in range(len(_A ) ): for j in range(i + 1 , len(_A ) ): _lowerCAmelCase : List[Any] = compare_string(binary[i] , binary[j] ) if k is False: _lowerCAmelCase : int = '*' _lowerCAmelCase : str = '*' temp.append('X' ) for i in range(len(_A ) ): if checka[i] == "$": pi.append(binary[i] ) if len(_A ) == 0: return pi _lowerCAmelCase : int = list(set(_A ) ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Optional[int] = [] for minterm in minterms: _lowerCAmelCase : List[str] = '' for _ in range(_A ): _lowerCAmelCase : Union[str, Any] = str(minterm % 2 ) + string minterm //= 2 temp.append(_A ) return temp def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = list(_A ) _lowerCAmelCase : Optional[int] = list(_A ) _lowerCAmelCase : Union[str, Any] = 0 for i in range(len(_A ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : str = [] _lowerCAmelCase : Optional[int] = [0] * len(_A ) for i in range(len(chart[0] ) ): _lowerCAmelCase : int = 0 _lowerCAmelCase : List[str] = -1 for j in range(len(_A ) ): if chart[j][i] == 1: count += 1 _lowerCAmelCase : Tuple = j if count == 1: _lowerCAmelCase : int = 1 for i in range(len(_A ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(_A ) ): _lowerCAmelCase : Any = 0 temp.append(prime_implicants[i] ) while True: _lowerCAmelCase : Any = 0 _lowerCAmelCase : str = -1 _lowerCAmelCase : Optional[int] = 0 for i in range(len(_A ) ): _lowerCAmelCase : List[str] = chart[i].count(1 ) if count_n > max_n: _lowerCAmelCase : Dict = count_n _lowerCAmelCase : Optional[Any] = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(_A ) ): _lowerCAmelCase : List[str] = 0 def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : List[str] = [[0 for x in range(len(_A ) )] for x in range(len(_A ) )] for i in range(len(_A ) ): _lowerCAmelCase : str = prime_implicants[i].count('_' ) for j in range(len(_A ) ): if is_for_table(prime_implicants[i] , binary[j] , _A ): _lowerCAmelCase : Union[str, Any] = 1 return chart def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = int(input('Enter the no. of variables\n' ) ) _lowerCAmelCase : Optional[int] = [ float(_A ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] _lowerCAmelCase : int = decimal_to_binary(_A , _A ) _lowerCAmelCase : List[str] = check(_A ) print('Prime Implicants are:' ) print(_A ) _lowerCAmelCase : Optional[int] = prime_implicant_chart(_A , _A ) _lowerCAmelCase : Tuple = selection(_A , _A ) print('Essential Prime Implicants are:' ) print(_A ) if __name__ == "__main__": import doctest doctest.testmod() main()
25
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "nat" __magic_name__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Tuple = embed_dim _lowerCAmelCase : Any = depths _lowerCAmelCase : Dict = len(snake_case__ ) _lowerCAmelCase : str = num_heads _lowerCAmelCase : Dict = kernel_size _lowerCAmelCase : Union[str, Any] = mlp_ratio _lowerCAmelCase : int = qkv_bias _lowerCAmelCase : Optional[Any] = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : List[str] = drop_path_rate _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : Tuple = layer_norm_eps _lowerCAmelCase : Dict = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) _lowerCAmelCase : Any = layer_scale_init_value _lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )] _lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
25
1
'''simple docstring''' from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = RobertaConfig __magic_name__ = "roberta" def __init__( self , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ ) _lowerCAmelCase : Union[str, Any] = RobertaEmbeddings(snake_case__ ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = RobertaConfig __magic_name__ = "roberta" def __init__( self , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ ) _lowerCAmelCase : Tuple = config.num_labels _lowerCAmelCase : Any = config.num_hidden_layers _lowerCAmelCase : int = DeeRobertaModel(snake_case__ ) _lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob ) _lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(snake_case__ ) def a ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=-1 , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : int = self.num_layers try: _lowerCAmelCase : str = self.roberta( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , position_ids=snake_case__ , head_mask=snake_case__ , inputs_embeds=snake_case__ , ) _lowerCAmelCase : List[Any] = outputs[1] _lowerCAmelCase : Tuple = self.dropout(snake_case__ ) _lowerCAmelCase : Optional[int] = self.classifier(snake_case__ ) _lowerCAmelCase : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _lowerCAmelCase : List[Any] = e.message _lowerCAmelCase : Dict = e.exit_layer _lowerCAmelCase : List[Any] = outputs[0] if not self.training: _lowerCAmelCase : Union[str, Any] = entropy(snake_case__ ) _lowerCAmelCase : Optional[Any] = [] _lowerCAmelCase : Any = [] if labels is not None: if self.num_labels == 1: # We are doing regression _lowerCAmelCase : Optional[Any] = MSELoss() _lowerCAmelCase : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _lowerCAmelCase : Dict = CrossEntropyLoss() _lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _lowerCAmelCase : Union[str, Any] = [] for highway_exit in outputs[-1]: _lowerCAmelCase : Optional[int] = highway_exit[0] if not self.training: highway_logits_all.append(snake_case__ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _lowerCAmelCase : Any = MSELoss() _lowerCAmelCase : Tuple = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _lowerCAmelCase : Dict = CrossEntropyLoss() _lowerCAmelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(snake_case__ ) if train_highway: _lowerCAmelCase : str = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _lowerCAmelCase : Dict = (loss,) + outputs if not self.training: _lowerCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _lowerCAmelCase : Any = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
25
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : str = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """roberta-base""": 5_12, """roberta-large""": 5_12, """roberta-large-mnli""": 5_12, """distilroberta-base""": 5_12, """roberta-base-openai-detector""": 5_12, """roberta-large-openai-detector""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = RobertaTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) ) _lowerCAmelCase : List[Any] = add_prefix_space _lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = add_prefix_space _lowerCAmelCase : Union[str, Any] = 'post_processor' _lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ ) if tokenizer_component_instance: _lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCAmelCase : Any = tuple(state['sep'] ) if "cls" in state: _lowerCAmelCase : str = tuple(state['cls'] ) _lowerCAmelCase : List[str] = False if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : int = add_prefix_space _lowerCAmelCase : Tuple = True if state.get('trim_offsets' , snake_case__ ) != trim_offsets: _lowerCAmelCase : Union[str, Any] = trim_offsets _lowerCAmelCase : Optional[int] = True if changes_to_apply: _lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) ) _lowerCAmelCase : Optional[int] = component_class(**snake_case__ ) setattr(self.backend_tokenizer , snake_case__ , snake_case__ ) @property def a ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value _lowerCAmelCase : Tuple = value def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
25
1
'''simple docstring''' import os import sys import unittest lowerCAmelCase : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowerCAmelCase : Dict = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") lowerCAmelCase : Optional[int] = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = get_test_to_tester_mapping(snake_case__ ) _lowerCAmelCase : str = get_test_to_tester_mapping(snake_case__ ) _lowerCAmelCase : Union[str, Any] = {'BertModelTest': 'BertModelTester'} _lowerCAmelCase : Optional[int] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(snake_case__ ) , snake_case__ ) self.assertEqual(get_test_info.to_json(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = get_model_to_test_mapping(snake_case__ ) _lowerCAmelCase : Dict = get_model_to_test_mapping(snake_case__ ) _lowerCAmelCase : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } _lowerCAmelCase : Tuple = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(snake_case__ ) , snake_case__ ) self.assertEqual(get_test_info.to_json(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = get_model_to_tester_mapping(snake_case__ ) _lowerCAmelCase : str = get_model_to_tester_mapping(snake_case__ ) _lowerCAmelCase : Dict = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } _lowerCAmelCase : List[str] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(snake_case__ ) , snake_case__ ) self.assertEqual(get_test_info.to_json(snake_case__ ) , snake_case__ )
25
'''simple docstring''' lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag. lowerCAmelCase : Optional[int] = 1 # The second color of the flag. lowerCAmelCase : int = 2 # The third color of the flag. lowerCAmelCase : Any = (red, white, blue) def lowercase (_A ): """simple docstring""" if not sequence: return [] if len(_A ) == 1: return list(_A ) _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : List[str] = len(_A ) - 1 _lowerCAmelCase : Optional[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid] high -= 1 else: _lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values' raise ValueError(_A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip() lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")] print(F'''{dutch_national_flag_sort(unsorted)}''')
25
1
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = 42 __magic_name__ = None __magic_name__ = None lowerCAmelCase : Any = namedtuple("""CoinsDistribResult""", """moves excess""") def lowercase (_A ): """simple docstring""" if root is None: return 0 # Validation def count_nodes(_A ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_A ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_A ) != count_coins(_A ): raise ValueError('The nodes number should be same as the number of coins' ) # Main calculation def get_distrib(_A ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_distrib(node.left ) _lowerCAmelCase , _lowerCAmelCase : int = get_distrib(node.right ) _lowerCAmelCase : int = 1 - left_distrib_excess _lowerCAmelCase : int = 1 - right_distrib_excess _lowerCAmelCase : str = ( left_distrib_moves + right_distrib_moves + abs(_A ) + abs(_A ) ) _lowerCAmelCase : Any = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_A , _A ) return get_distrib(_A )[0] if __name__ == "__main__": import doctest doctest.testmod()
25
'''simple docstring''' def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1] _lowerCAmelCase : int = 6 _lowerCAmelCase : Dict = 1 _lowerCAmelCase : Optional[int] = 1_9_0_1 _lowerCAmelCase : Optional[Any] = 0 while year < 2_0_0_1: day += 7 if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] elif day > 2_9 and month == 2: month += 1 _lowerCAmelCase : List[str] = day - 2_9 else: if day > days_per_month[month - 1]: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] if month > 1_2: year += 1 _lowerCAmelCase : Optional[int] = 1 if year < 2_0_0_1 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
25
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""", # See all DPT models at https://huggingface.co/models?filter=dpt } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "dpt" def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=384 , snake_case__=16 , snake_case__=3 , snake_case__=False , snake_case__=True , snake_case__=[2, 5, 8, 11] , snake_case__="project" , snake_case__=[4, 2, 1, 0.5] , snake_case__=[96, 192, 384, 768] , snake_case__=256 , snake_case__=-1 , snake_case__=False , snake_case__=True , snake_case__=0.4 , snake_case__=255 , snake_case__=0.1 , snake_case__=[1, 1024, 24, 24] , snake_case__=[0, 1] , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Optional[Any] = hidden_size _lowerCAmelCase : Any = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('Initializing the config with a `BiT` backbone.' ) _lowerCAmelCase : Union[str, Any] = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } _lowerCAmelCase : List[Any] = BitConfig(**snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): logger.info('Initializing the config with a `BiT` backbone.' ) _lowerCAmelCase : str = BitConfig(**snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Optional[Any] = backbone_config else: raise ValueError( F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' ) _lowerCAmelCase : Union[str, Any] = backbone_featmap_shape _lowerCAmelCase : List[Any] = neck_ignore_stages if readout_type != "project": raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' ) else: _lowerCAmelCase : Union[str, Any] = None _lowerCAmelCase : Dict = None _lowerCAmelCase : List[Any] = [] _lowerCAmelCase : Dict = num_hidden_layers _lowerCAmelCase : Tuple = num_attention_heads _lowerCAmelCase : List[str] = intermediate_size _lowerCAmelCase : str = hidden_act _lowerCAmelCase : List[str] = hidden_dropout_prob _lowerCAmelCase : List[str] = attention_probs_dropout_prob _lowerCAmelCase : Union[str, Any] = initializer_range _lowerCAmelCase : List[str] = layer_norm_eps _lowerCAmelCase : Dict = image_size _lowerCAmelCase : Tuple = patch_size _lowerCAmelCase : Tuple = num_channels _lowerCAmelCase : List[str] = qkv_bias _lowerCAmelCase : int = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' ) _lowerCAmelCase : int = readout_type _lowerCAmelCase : Dict = reassemble_factors _lowerCAmelCase : Optional[Any] = neck_hidden_sizes _lowerCAmelCase : Tuple = fusion_hidden_size _lowerCAmelCase : Optional[int] = head_in_index _lowerCAmelCase : Union[str, Any] = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) _lowerCAmelCase : Optional[Any] = use_auxiliary_head _lowerCAmelCase : List[Any] = auxiliary_loss_weight _lowerCAmelCase : List[str] = semantic_loss_ignore_index _lowerCAmelCase : List[Any] = semantic_classifier_dropout def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: _lowerCAmelCase : Optional[int] = self.backbone_config.to_dict() _lowerCAmelCase : Tuple = self.__class__.model_type return output
25
'''simple docstring''' def lowercase (_A = 1_0_0_0_0_0_0 ): """simple docstring""" _lowerCAmelCase : Any = set(range(3 , _A , 2 ) ) primes.add(2 ) for p in range(3 , _A , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _A , _A ) ) ) _lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )] for p in primes: for n in range(_A , limit + 1 , _A ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
25
'''simple docstring''' import argparse import os import re lowerCAmelCase : Tuple = """src/transformers""" # Pattern that looks at the indentation in a line. lowerCAmelCase : str = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""") def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = _re_indent.search(_A ) return "" if search is None else search.groups()[0] def lowercase (_A , _A="" , _A=None , _A=None ): """simple docstring""" _lowerCAmelCase : int = 0 _lowerCAmelCase : Dict = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 _lowerCAmelCase : Dict = ['\n'.join(lines[:index] )] else: _lowerCAmelCase : str = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCAmelCase : List[Any] = [lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_A ) ) if index < len(_A ) - 1: _lowerCAmelCase : Union[str, Any] = [lines[index + 1]] index += 1 else: _lowerCAmelCase : Union[str, Any] = [] else: blocks.append('\n'.join(_A ) ) _lowerCAmelCase : List[str] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append('\n'.join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append('\n'.join(lines[index:] ) ) return blocks def lowercase (_A ): """simple docstring""" def _inner(_A ): return key(_A ).lower().replace('_' , '' ) return _inner def lowercase (_A , _A=None ): """simple docstring""" def noop(_A ): return x if key is None: _lowerCAmelCase : List[Any] = noop # Constants are all uppercase, they go first. _lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. _lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()] _lowerCAmelCase : Dict = ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def lowercase (_A ): """simple docstring""" def _replace(_A ): _lowerCAmelCase : Dict = match.groups()[0] if "," not in imports: return f'[{imports}]' _lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : int = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" _lowerCAmelCase : Tuple = import_statement.split('\n' ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1 _lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] ) _lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : List[str] = keys[:-1] _lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line _lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A ) return import_statement def lowercase (_A , _A=True ): """simple docstring""" with open(_A , encoding='utf-8' ) as f: _lowerCAmelCase : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCAmelCase : Tuple = split_code_in_indented_blocks( _A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCAmelCase : Tuple = main_blocks[block_idx] _lowerCAmelCase : int = block.split('\n' ) # Get to the start of the imports. _lowerCAmelCase : Tuple = 0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCAmelCase : Dict = len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. _lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] ) _lowerCAmelCase : Tuple = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None] _lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCAmelCase : int = 0 _lowerCAmelCase : Optional[Any] = [] for i in range(len(_A ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: _lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. _lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(_A ) ) def lowercase (_A=True ): """simple docstring""" _lowerCAmelCase : int = [] for root, _, files in os.walk(_A ): if "__init__.py" in files: _lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A ) if result: _lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCAmelCase : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
25
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Any = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "wavlm" def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1E-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(512, 512, 512, 512, 512, 512, 512) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=320 , snake_case__=800 , snake_case__=False , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=320 , snake_case__=2 , snake_case__=0.1 , snake_case__=100 , snake_case__=256 , snake_case__=256 , snake_case__=0.1 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=(512, 512, 512, 512, 1500) , snake_case__=(5, 3, 3, 1, 1) , snake_case__=(1, 2, 3, 1, 1) , snake_case__=512 , snake_case__=80 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=3 , snake_case__=2 , snake_case__=3 , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) _lowerCAmelCase : str = hidden_size _lowerCAmelCase : List[Any] = feat_extract_norm _lowerCAmelCase : List[Any] = feat_extract_activation _lowerCAmelCase : Dict = list(snake_case__ ) _lowerCAmelCase : List[Any] = list(snake_case__ ) _lowerCAmelCase : Tuple = list(snake_case__ ) _lowerCAmelCase : Any = conv_bias _lowerCAmelCase : Optional[int] = num_buckets _lowerCAmelCase : Optional[int] = max_bucket_distance _lowerCAmelCase : int = num_conv_pos_embeddings _lowerCAmelCase : Optional[int] = num_conv_pos_embedding_groups _lowerCAmelCase : str = len(self.conv_dim ) _lowerCAmelCase : Dict = num_hidden_layers _lowerCAmelCase : List[str] = intermediate_size _lowerCAmelCase : List[str] = hidden_act _lowerCAmelCase : Dict = num_attention_heads _lowerCAmelCase : int = hidden_dropout _lowerCAmelCase : Any = attention_dropout _lowerCAmelCase : List[str] = activation_dropout _lowerCAmelCase : Any = feat_proj_dropout _lowerCAmelCase : Dict = final_dropout _lowerCAmelCase : List[Any] = layerdrop _lowerCAmelCase : List[str] = layer_norm_eps _lowerCAmelCase : Tuple = initializer_range _lowerCAmelCase : Tuple = num_ctc_classes _lowerCAmelCase : List[str] = vocab_size _lowerCAmelCase : List[Any] = do_stable_layer_norm _lowerCAmelCase : Optional[Any] = use_weighted_layer_sum _lowerCAmelCase : List[str] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase : str = apply_spec_augment _lowerCAmelCase : Any = mask_time_prob _lowerCAmelCase : Optional[int] = mask_time_length _lowerCAmelCase : Union[str, Any] = mask_time_min_masks _lowerCAmelCase : Tuple = mask_feature_prob _lowerCAmelCase : List[str] = mask_feature_length # parameters for pretraining with codevector quantized representations _lowerCAmelCase : Any = num_codevectors_per_group _lowerCAmelCase : str = num_codevector_groups _lowerCAmelCase : Dict = contrastive_logits_temperature _lowerCAmelCase : List[str] = num_negatives _lowerCAmelCase : Optional[int] = codevector_dim _lowerCAmelCase : int = proj_codevector_dim _lowerCAmelCase : Dict = diversity_loss_weight # ctc loss _lowerCAmelCase : Tuple = ctc_loss_reduction _lowerCAmelCase : Optional[int] = ctc_zero_infinity # adapter _lowerCAmelCase : Tuple = add_adapter _lowerCAmelCase : Optional[Any] = adapter_kernel_size _lowerCAmelCase : Union[str, Any] = adapter_stride _lowerCAmelCase : Any = num_adapter_layers _lowerCAmelCase : List[str] = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCAmelCase : List[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCAmelCase : Tuple = list(snake_case__ ) _lowerCAmelCase : List[Any] = list(snake_case__ ) _lowerCAmelCase : List[str] = list(snake_case__ ) _lowerCAmelCase : str = xvector_output_dim @property def a ( self ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
25
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = KandinskyVaaInpaintPipeline __magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"] __magic_name__ = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] __magic_name__ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__ = False @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return self.time_input_dim @property def a ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def a ( self ): '''simple docstring''' return 100 @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ ) return model @property def a ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs ) return model def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.dummy_unet _lowerCAmelCase : List[Any] = self.dummy_movq _lowerCAmelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , ) _lowerCAmelCase : Any = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create init_image _lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) ) # create mask _lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase : Dict = 0 if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Optional[int] = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = 'cpu' _lowerCAmelCase : int = self.get_dummy_components() _lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) ) _lowerCAmelCase : int = output.images _lowerCAmelCase : int = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] _lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] _lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(F'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : List[str] = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def a ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) _lowerCAmelCase : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : List[str] = 'a hat' _lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) _lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) _lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _lowerCAmelCase : Optional[Any] = pipeline( image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
25
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[Any] = SwinvaConfig() _lowerCAmelCase : Any = swinva_name.split('_' ) _lowerCAmelCase : Dict = name_split[1] if "to" in name_split[3]: _lowerCAmelCase : Tuple = int(name_split[3][-3:] ) else: _lowerCAmelCase : Any = int(name_split[3] ) if "to" in name_split[2]: _lowerCAmelCase : int = int(name_split[2][-2:] ) else: _lowerCAmelCase : int = int(name_split[2][6:] ) if model_size == "tiny": _lowerCAmelCase : Union[str, Any] = 9_6 _lowerCAmelCase : List[str] = (2, 2, 6, 2) _lowerCAmelCase : str = (3, 6, 1_2, 2_4) elif model_size == "small": _lowerCAmelCase : str = 9_6 _lowerCAmelCase : List[str] = (2, 2, 1_8, 2) _lowerCAmelCase : List[str] = (3, 6, 1_2, 2_4) elif model_size == "base": _lowerCAmelCase : int = 1_2_8 _lowerCAmelCase : Any = (2, 2, 1_8, 2) _lowerCAmelCase : str = (4, 8, 1_6, 3_2) else: _lowerCAmelCase : Union[str, Any] = 1_9_2 _lowerCAmelCase : Dict = (2, 2, 1_8, 2) _lowerCAmelCase : List[Any] = (6, 1_2, 2_4, 4_8) if "to" in swinva_name: _lowerCAmelCase : List[Any] = (1_2, 1_2, 1_2, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): _lowerCAmelCase : List[str] = 2_1_8_4_1 _lowerCAmelCase : List[Any] = 'huggingface/label-files' _lowerCAmelCase : str = 'imagenet-22k-id2label.json' _lowerCAmelCase : int = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) ) _lowerCAmelCase : Optional[Any] = {int(_A ): v for k, v in idalabel.items()} _lowerCAmelCase : List[str] = idalabel _lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} else: _lowerCAmelCase : List[Any] = 1_0_0_0 _lowerCAmelCase : Any = 'huggingface/label-files' _lowerCAmelCase : Union[str, Any] = 'imagenet-1k-id2label.json' _lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) ) _lowerCAmelCase : Optional[Any] = {int(_A ): v for k, v in idalabel.items()} _lowerCAmelCase : Tuple = idalabel _lowerCAmelCase : Any = {v: k for k, v in idalabel.items()} _lowerCAmelCase : int = img_size _lowerCAmelCase : int = num_classes _lowerCAmelCase : Dict = embed_dim _lowerCAmelCase : Optional[Any] = depths _lowerCAmelCase : List[Any] = num_heads _lowerCAmelCase : List[Any] = window_size return config def lowercase (_A ): """simple docstring""" if "patch_embed.proj" in name: _lowerCAmelCase : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _lowerCAmelCase : List[str] = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: _lowerCAmelCase : str = 'encoder.' + name if "attn.proj" in name: _lowerCAmelCase : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: _lowerCAmelCase : Tuple = name.replace('attn' , 'attention.self' ) if "norm1" in name: _lowerCAmelCase : List[Any] = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: _lowerCAmelCase : List[Any] = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: _lowerCAmelCase : Any = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: _lowerCAmelCase : List[Any] = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: _lowerCAmelCase : List[Any] = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: _lowerCAmelCase : List[Any] = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: _lowerCAmelCase : List[Any] = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: _lowerCAmelCase : Tuple = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if name == "norm.weight": _lowerCAmelCase : str = 'layernorm.weight' if name == "norm.bias": _lowerCAmelCase : int = 'layernorm.bias' if "head" in name: _lowerCAmelCase : Tuple = name.replace('head' , 'classifier' ) else: _lowerCAmelCase : Optional[int] = 'swinv2.' + name return name def lowercase (_A , _A ): """simple docstring""" for key in orig_state_dict.copy().keys(): _lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_A ) if "mask" in key: continue elif "qkv" in key: _lowerCAmelCase : Union[str, Any] = key.split('.' ) _lowerCAmelCase : List[str] = int(key_split[1] ) _lowerCAmelCase : Optional[Any] = int(key_split[3] ) _lowerCAmelCase : Optional[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _lowerCAmelCase : List[Any] = val[:dim, :] _lowerCAmelCase : List[str] = val[dim : dim * 2, :] _lowerCAmelCase : Union[str, Any] = val[-dim:, :] else: _lowerCAmelCase : List[Any] = val[:dim] _lowerCAmelCase : Union[str, Any] = val[ dim : dim * 2 ] _lowerCAmelCase : Tuple = val[-dim:] else: _lowerCAmelCase : Union[str, Any] = val return orig_state_dict def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Optional[int] = timm.create_model(_A , pretrained=_A ) timm_model.eval() _lowerCAmelCase : Any = get_swinva_config(_A ) _lowerCAmelCase : Union[str, Any] = SwinvaForImageClassification(_A ) model.eval() _lowerCAmelCase : Any = convert_state_dict(timm_model.state_dict() , _A ) model.load_state_dict(_A ) _lowerCAmelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' _lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) ) _lowerCAmelCase : List[Any] = Image.open(requests.get(_A , stream=_A ).raw ) _lowerCAmelCase : int = image_processor(images=_A , return_tensors='pt' ) _lowerCAmelCase : Any = timm_model(inputs['pixel_values'] ) _lowerCAmelCase : Dict = model(**_A ).logits assert torch.allclose(_A , _A , atol=1E-3 ) print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(_A ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(_A ) model.push_to_hub( repo_path_or_name=Path(_A , _A ) , organization='nandwalritik' , commit_message='Add model' , ) if __name__ == "__main__": lowerCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swinv2_name""", default="""swinv2_tiny_patch4_window8_256""", type=str, help="""Name of the Swinv2 timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowerCAmelCase : str = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
25
'''simple docstring''' from __future__ import annotations from typing import Any def lowercase (_A ): """simple docstring""" if not postfix_notation: return 0 _lowerCAmelCase : int = {'+', '-', '*', '/'} _lowerCAmelCase : list[Any] = [] for token in postfix_notation: if token in operations: _lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_A ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = tempfile.mkdtemp() _lowerCAmelCase : Dict = 8 # DPR tok _lowerCAmelCase : Tuple = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _lowerCAmelCase : Dict = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) _lowerCAmelCase : List[str] = os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok _lowerCAmelCase : Dict = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] _lowerCAmelCase : List[str] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) _lowerCAmelCase : Any = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _lowerCAmelCase : Tuple = {'unk_token': '<unk>'} _lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) _lowerCAmelCase : List[str] = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES['vocab_file'] ) _lowerCAmelCase : str = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(snake_case__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(snake_case__ ) ) def a ( self ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def a ( self ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def a ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) @require_tokenizers def a ( self ): '''simple docstring''' _lowerCAmelCase : str = os.path.join(self.tmpdirname , 'rag_tokenizer' ) _lowerCAmelCase : List[str] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) _lowerCAmelCase : Dict = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(snake_case__ ) rag_tokenizer.save_pretrained(snake_case__ ) _lowerCAmelCase : Dict = RagTokenizer.from_pretrained(snake_case__ , config=snake_case__ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , snake_case__ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , snake_case__ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = RagTokenizer.from_pretrained('facebook/rag-token-nq' ) _lowerCAmelCase : Tuple = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] _lowerCAmelCase : Union[str, Any] = tokenizer(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' ) _lowerCAmelCase : Any = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] _lowerCAmelCase : Optional[Any] = tokenizer(snake_case__ ) self.assertIsNotNone(snake_case__ )
25
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mobilenet_v2" def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Union[str, Any] = image_size _lowerCAmelCase : List[Any] = depth_multiplier _lowerCAmelCase : List[Any] = depth_divisible_by _lowerCAmelCase : Optional[Any] = min_depth _lowerCAmelCase : str = expand_ratio _lowerCAmelCase : str = output_stride _lowerCAmelCase : Any = first_layer_is_expansion _lowerCAmelCase : int = finegrained_output _lowerCAmelCase : str = hidden_act _lowerCAmelCase : List[str] = tf_padding _lowerCAmelCase : Optional[int] = classifier_dropout_prob _lowerCAmelCase : int = initializer_range _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : str = semantic_loss_ignore_index class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = version.parse("1.11" ) @property def a ( self ): '''simple docstring''' return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' return 1E-4
25
1
'''simple docstring''' from __future__ import annotations class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = data _lowerCAmelCase : Node | None = None _lowerCAmelCase : Node | None = None def lowercase (_A ): # In Order traversal of the tree """simple docstring""" if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowercase (_A ): """simple docstring""" return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def lowercase (_A ): """simple docstring""" if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowercase (): # Main function for testing. """simple docstring""" _lowerCAmelCase : List[Any] = Node(1 ) _lowerCAmelCase : Tuple = Node(2 ) _lowerCAmelCase : str = Node(3 ) _lowerCAmelCase : Tuple = Node(4 ) _lowerCAmelCase : Union[str, Any] = Node(5 ) _lowerCAmelCase : Optional[int] = Node(6 ) _lowerCAmelCase : Tuple = Node(7 ) _lowerCAmelCase : List[str] = Node(8 ) _lowerCAmelCase : int = Node(9 ) print(is_full_binary_tree(_A ) ) print(depth_of_tree(_A ) ) print('Tree is: ' ) display(_A ) if __name__ == "__main__": main()
25
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER _lowerCAmelCase : Optional[int] = 'pt' _lowerCAmelCase : Tuple = 'tf' def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ ) model_tf.save_pretrained(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = 'mock_framework' # Framework provided - return whatever the user provides _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(snake_case__ ): _lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase : Any = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase : int = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase : str = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): with self.assertRaises(snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
25
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase__ : """simple docstring""" def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return None class UpperCamelCase__ : """simple docstring""" def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return None class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = [ # (model_name, model_kwargs) ("bert-base-cased", {}), ("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def a ( self ): '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(snake_case__ , 'tf' , 12 , **snake_case__ ) @require_torch @slow def a ( self ): '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(snake_case__ , 'pt' , 12 , **snake_case__ ) @require_torch @slow def a ( self ): '''simple docstring''' from transformers import BertModel _lowerCAmelCase : List[Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(snake_case__ ) ) vocab_file.flush() _lowerCAmelCase : List[Any] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: _lowerCAmelCase : List[Any] = BertModel(BertConfig(vocab_size=len(snake_case__ ) ) ) model.save_pretrained(snake_case__ ) self._test_export(snake_case__ , 'pt' , 12 , snake_case__ ) @require_tf @slow def a ( self ): '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: _lowerCAmelCase : int = self._test_export(snake_case__ , 'tf' , 12 , **snake_case__ ) _lowerCAmelCase : List[Any] = quantize(Path(snake_case__ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(snake_case__ ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def a ( self ): '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: _lowerCAmelCase : Dict = self._test_export(snake_case__ , 'pt' , 12 , **snake_case__ ) _lowerCAmelCase : Union[str, Any] = quantize(snake_case__ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(snake_case__ ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ): '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: _lowerCAmelCase : int = Path(snake_case__ ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ) return path except Exception as e: self.fail(snake_case__ ) @require_torch @require_tokenizers @slow def a ( self ): '''simple docstring''' from transformers import BertModel _lowerCAmelCase : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) _lowerCAmelCase : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(snake_case__ , snake_case__ , 'pt' ) @require_tf @require_tokenizers @slow def a ( self ): '''simple docstring''' from transformers import TFBertModel _lowerCAmelCase : List[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) _lowerCAmelCase : List[str] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(snake_case__ , snake_case__ , 'tf' ) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = FeatureExtractionPipeline(snake_case__ , snake_case__ ) _lowerCAmelCase : str = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = infer_shapes(snake_case__ , snake_case__ ) # Assert all variables are present self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , snake_case__ ) self.assertSequenceEqual(variable_names[3:] , snake_case__ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] , {0: 'batch'} ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids'] _lowerCAmelCase : Any = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = ensure_valid_input(FuncContiguousArgs() , snake_case__ , snake_case__ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(snake_case__ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(snake_case__ ) , set(snake_case__ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(snake_case__ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = ensure_valid_input(FuncNonContiguousArgs() , snake_case__ , snake_case__ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(snake_case__ ) , 1 ) self.assertEqual(len(snake_case__ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] , 'input_ids' ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
25
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase : Optional[int] = None lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Any = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """facebook/nllb-large-en-ro""": 10_24, """facebook/nllb-200-distilled-600M""": 10_24, } # fmt: off lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = NllbTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token _lowerCAmelCase : Dict = legacy_behaviour super().__init__( vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[str] = vocab_file _lowerCAmelCase : int = False if not self.vocab_file else True _lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _lowerCAmelCase : Any = { lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn' _lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang ) _lowerCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def a ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _lowerCAmelCase : Optional[Any] = src_lang _lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ ) _lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ ) _lowerCAmelCase : Optional[Any] = tgt_lang_id return inputs def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[str] = src_lang _lowerCAmelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def a ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def a ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : Dict = [] _lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : int = [self.eos_token_id] _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : int = [] _lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : List[str] = [self.eos_token_id] _lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return _lowerCAmelCase : Union[str, Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
25
1
'''simple docstring''' from math import isqrt def lowercase (_A ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) ) def lowercase (_A = 1_0**6 ): """simple docstring""" _lowerCAmelCase : str = 0 _lowerCAmelCase : str = 1 _lowerCAmelCase : List[str] = 7 while prime_candidate < max_prime: primes_count += is_prime(_A ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
25
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase : List[str] = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def lowercase (_A ): """simple docstring""" for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""") lowerCAmelCase : Dict = parser.parse_args() if args.check_lib: lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""") lowerCAmelCase : int = Path(transformers_module.__file__).parent else: lowerCAmelCase : int = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
25
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : List[Any] = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
25
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" _lowerCAmelCase : List[str] = '' _lowerCAmelCase : Any = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i _lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )] # for each character in new_string find corresponding palindromic string _lowerCAmelCase : Any = 0 for j in range(len(_A ) ): _lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowerCAmelCase : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741 _lowerCAmelCase : int = j + k - 1 # update max_length and start position if max_length < length[j]: _lowerCAmelCase : Dict = length[j] _lowerCAmelCase : Optional[int] = j # create that string _lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def lowercase (_A = 3 ): """simple docstring""" if isinstance(_A , _A ): raise TypeError('number of qubits must be a integer.' ) if number_of_qubits <= 0: raise ValueError('number of qubits must be > 0.' ) if math.floor(_A ) != number_of_qubits: raise ValueError('number of qubits must be exact integer.' ) if number_of_qubits > 1_0: raise ValueError('number of qubits too large to simulate(>10).' ) _lowerCAmelCase : Optional[int] = QuantumRegister(_A , 'qr' ) _lowerCAmelCase : int = ClassicalRegister(_A , 'cr' ) _lowerCAmelCase : Tuple = QuantumCircuit(_A , _A ) _lowerCAmelCase : Any = number_of_qubits for i in range(_A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_A , _A ) # simulate with 10000 shots _lowerCAmelCase : Dict = Aer.get_backend('qasm_simulator' ) _lowerCAmelCase : str = execute(_A , _A , shots=1_0_0_0_0 ) return job.result().get_counts(_A ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
25
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = 0 __magic_name__ = False __magic_name__ = 3.0 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _lowerCAmelCase : str = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , snake_case__ ) @require_multi_gpu def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00) lowerCAmelCase : List[str] = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase : List[Any] = """""" lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
25
1
'''simple docstring''' import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Optional[int] = 'The dog is cute and lives in the garden house' _lowerCAmelCase : List[Any] = jnp.array([tokenizer.encode(snake_case__ )] ) _lowerCAmelCase : List[Any] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim _lowerCAmelCase : Tuple = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) _lowerCAmelCase : List[str] = model(snake_case__ )['last_hidden_state'] self.assertEqual(output.shape , snake_case__ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
25
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "trajectory_transformer" __magic_name__ = ["past_key_values"] __magic_name__ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = action_weight _lowerCAmelCase : Optional[int] = reward_weight _lowerCAmelCase : Union[str, Any] = value_weight _lowerCAmelCase : List[str] = max_position_embeddings _lowerCAmelCase : Tuple = block_size _lowerCAmelCase : List[Any] = action_dim _lowerCAmelCase : List[Any] = observation_dim _lowerCAmelCase : Union[str, Any] = transition_dim _lowerCAmelCase : Tuple = learning_rate _lowerCAmelCase : int = n_layer _lowerCAmelCase : Any = n_head _lowerCAmelCase : Tuple = n_embd _lowerCAmelCase : Optional[Any] = embd_pdrop _lowerCAmelCase : Union[str, Any] = attn_pdrop _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : List[Any] = layer_norm_eps _lowerCAmelCase : Union[str, Any] = kaiming_initializer_range _lowerCAmelCase : List[Any] = use_cache super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
25
1
'''simple docstring''' import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = IFPipeline __magic_name__ = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} __magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS __magic_name__ = PipelineTesterMixin.required_optional_params - {"latents"} def a ( self ): '''simple docstring''' return self._get_dummy_components() def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Tuple = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : Dict = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Optional[int] = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def a ( self ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def a ( self ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def a ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def a ( self ): '''simple docstring''' self._test_save_load_local() def a ( self ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def a ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa ) _lowerCAmelCase : Any = IFSuperResolutionPipeline.from_pretrained( 'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('cuda' ) _lowerCAmelCase , _lowerCAmelCase : List[Any] = pipe_a.encode_prompt('anime turtle' , device='cuda' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _lowerCAmelCase : List[str] = None _lowerCAmelCase : Tuple = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _lowerCAmelCase : List[str] = IFImgaImgPipeline(**pipe_a.components ) _lowerCAmelCase : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _lowerCAmelCase : int = IFInpaintingPipeline(**pipe_a.components ) _lowerCAmelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _start_torch_memory_measurement() _lowerCAmelCase : Tuple = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase : int = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) _lowerCAmelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _lowerCAmelCase : Union[str, Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() _lowerCAmelCase : int = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ ) _lowerCAmelCase : List[Any] = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCAmelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCAmelCase : Optional[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _start_torch_memory_measurement() _lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ ) _lowerCAmelCase : List[str] = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase : int = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type='np' , ) _lowerCAmelCase : Optional[Any] = output.images[0] assert image.shape == (64, 64, 3) _lowerCAmelCase : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCAmelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() _lowerCAmelCase : Tuple = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCAmelCase : int = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCAmelCase : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _start_torch_memory_measurement() _lowerCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ ) _lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(snake_case__ ) _lowerCAmelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase : Any = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type='np' , ) _lowerCAmelCase : Optional[Any] = output.images[0] assert image.shape == (64, 64, 3) _lowerCAmelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCAmelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() _lowerCAmelCase : Dict = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case__ ) _lowerCAmelCase : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(snake_case__ ) _lowerCAmelCase : int = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type='np' , ) _lowerCAmelCase : List[str] = output.images[0] assert image.shape == (256, 256, 3) _lowerCAmelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCAmelCase : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def lowercase (): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
25
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase : Union[str, Any] = 25_00_04 lowerCAmelCase : int = 25_00_20 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = MBartaaTokenizer __magic_name__ = MBartaaTokenizerFast __magic_name__ = True __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = '<s>' _lowerCAmelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case__ ) , 1054 ) def a ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) _lowerCAmelCase : Any = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def a ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) _lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=True _lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=False _lowerCAmelCase : Optional[int] = tempfile.mkdtemp() _lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = "facebook/mbart-large-50-one-to-many-mmt" __magic_name__ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] __magic_name__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] __magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def a ( cls ): '''simple docstring''' _lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) _lowerCAmelCase : Dict = 1 return cls def a ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) def a ( self ): '''simple docstring''' self.assertIn(snake_case__ , self.tokenizer.all_special_ids ) _lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] _lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) _lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertNotIn(self.tokenizer.eos_token , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , snake_case__ ) _lowerCAmelCase : List[str] = 10 _lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0] self.assertEqual(ids[0] , snake_case__ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case__ ) _lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' ) _lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) _lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' ) _lowerCAmelCase : str = self.tokenizer( text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' ) _lowerCAmelCase : List[Any] = targets['input_ids'] _lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(snake_case__ ) , { # en_XX, A, test, EOS 'input_ids': [[25_0004, 62, 3034, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_0001, } , )
25
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase : Any = 16 lowerCAmelCase : Optional[Any] = 32 def lowercase (_A , _A = 1_6 ): """simple docstring""" _lowerCAmelCase : str = AutoTokenizer.from_pretrained('bert-base-cased' ) _lowerCAmelCase : str = load_dataset('glue' , 'mrpc' ) def tokenize_function(_A ): # max_length=None => use the model max length (it's actually the default) _lowerCAmelCase : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_A , max_length=_A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _lowerCAmelCase : Dict = datasets.map( _A , batched=_A , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCAmelCase : List[Any] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_A ): # On TPU it's best to pad everything to the same length or training will be very slow. _lowerCAmelCase : List[str] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _lowerCAmelCase : int = 1_6 elif accelerator.mixed_precision != "no": _lowerCAmelCase : List[Any] = 8 else: _lowerCAmelCase : Union[str, Any] = None return tokenizer.pad( _A , padding='longest' , max_length=_A , pad_to_multiple_of=_A , return_tensors='pt' , ) # Instantiate dataloaders. _lowerCAmelCase : Dict = DataLoader( tokenized_datasets['train'] , shuffle=_A , collate_fn=_A , batch_size=_A ) _lowerCAmelCase : Tuple = DataLoader( tokenized_datasets['validation'] , shuffle=_A , collate_fn=_A , batch_size=_A ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase : Any = mocked_dataloaders # noqa: F811 def lowercase (_A , _A ): """simple docstring""" if os.environ.get('TESTING_MOCKED_DATALOADERS' , _A ) == "1": _lowerCAmelCase : Any = 2 # Initialize accelerator _lowerCAmelCase : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCAmelCase : str = config['lr'] _lowerCAmelCase : int = int(config['num_epochs'] ) _lowerCAmelCase : Optional[Any] = int(config['seed'] ) _lowerCAmelCase : List[Any] = int(config['batch_size'] ) _lowerCAmelCase : Optional[int] = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation _lowerCAmelCase : Optional[int] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _lowerCAmelCase : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE _lowerCAmelCase : Optional[Any] = MAX_GPU_BATCH_SIZE set_seed(_A ) _lowerCAmelCase , _lowerCAmelCase : Dict = get_dataloaders(_A , _A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_A ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowerCAmelCase : Union[str, Any] = model.to(accelerator.device ) # Instantiate optimizer _lowerCAmelCase : Any = AdamW(params=model.parameters() , lr=_A ) # Instantiate scheduler _lowerCAmelCase : List[str] = get_linear_schedule_with_warmup( optimizer=_A , num_warmup_steps=1_0_0 , num_training_steps=(len(_A ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = accelerator.prepare( _A , _A , _A , _A , _A ) # Now we train the model for epoch in range(_A ): model.train() for step, batch in enumerate(_A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _lowerCAmelCase : Dict = model(**_A ) _lowerCAmelCase : str = outputs.loss _lowerCAmelCase : str = loss / gradient_accumulation_steps accelerator.backward(_A ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() _lowerCAmelCase : List[str] = 0 for step, batch in enumerate(_A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCAmelCase : int = model(**_A ) _lowerCAmelCase : int = outputs.logits.argmax(dim=-1 ) _lowerCAmelCase , _lowerCAmelCase : int = accelerator.gather((predictions, batch['labels']) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(_A ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples _lowerCAmelCase : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen] _lowerCAmelCase : Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=_A , references=_A , ) _lowerCAmelCase : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , _A ) def lowercase (): """simple docstring""" _lowerCAmelCase : List[str] = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=_A , default=_A , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) _lowerCAmelCase : List[str] = parser.parse_args() _lowerCAmelCase : int = {'lr': 2E-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6} training_function(_A , _A ) if __name__ == "__main__": main()
25
'''simple docstring''' from math import isqrt def lowercase (_A ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) ) def lowercase (_A = 1_0**6 ): """simple docstring""" _lowerCAmelCase : str = 0 _lowerCAmelCase : str = 1 _lowerCAmelCase : List[str] = 7 while prime_candidate < max_prime: primes_count += is_prime(_A ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name def lowercase (_A ): """simple docstring""" if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(_A ): return ext raise Exception( f'Unable to determine file format from file extension {path}. ' f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' ) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Tuple = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) _lowerCAmelCase : List[Any] = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format _lowerCAmelCase : Dict = PipelineDataFormat.from_str( format=_A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(_A , _A ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = nlp _lowerCAmelCase : List[str] = reader @staticmethod def a ( snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = parser.add_parser('run' , help='Run a pipeline through the CLI' ) run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' ) run_parser.add_argument('--input' , type=snake_case__ , help='Path to the file to use for inference' ) run_parser.add_argument('--output' , type=snake_case__ , help='Path to the file that will be used post to write results.' ) run_parser.add_argument('--model' , type=snake_case__ , help='Name or path to the model to instantiate.' ) run_parser.add_argument('--config' , type=snake_case__ , help='Name or path to the model\'s config to instantiate.' ) run_parser.add_argument( '--tokenizer' , type=snake_case__ , help='Name of the tokenizer to use. (default: same as the model name)' ) run_parser.add_argument( '--column' , type=snake_case__ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , ) run_parser.add_argument( '--format' , type=snake_case__ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , ) run_parser.add_argument( '--device' , type=snake_case__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , ) run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' ) run_parser.set_defaults(func=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Any = self._nlp, [] for entry in self._reader: _lowerCAmelCase : Optional[int] = nlp(**snake_case__ ) if self._reader.is_multi_columns else nlp(snake_case__ ) if isinstance(snake_case__ , snake_case__ ): outputs.append(snake_case__ ) else: outputs += output # Saving data if self._nlp.binary_output: _lowerCAmelCase : str = self._reader.save_binary(snake_case__ ) logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}' ) else: self._reader.save(snake_case__ )
25
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mvp" __magic_name__ = ["past_key_values"] __magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = max_position_embeddings _lowerCAmelCase : Optional[Any] = d_model _lowerCAmelCase : Optional[int] = encoder_ffn_dim _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = encoder_attention_heads _lowerCAmelCase : Any = decoder_ffn_dim _lowerCAmelCase : Optional[Any] = decoder_layers _lowerCAmelCase : int = decoder_attention_heads _lowerCAmelCase : Union[str, Any] = dropout _lowerCAmelCase : List[Any] = attention_dropout _lowerCAmelCase : List[str] = activation_dropout _lowerCAmelCase : Optional[Any] = activation_function _lowerCAmelCase : Any = init_std _lowerCAmelCase : Any = encoder_layerdrop _lowerCAmelCase : Union[str, Any] = decoder_layerdrop _lowerCAmelCase : Optional[int] = classifier_dropout _lowerCAmelCase : List[Any] = use_cache _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCAmelCase : Optional[Any] = use_prompt _lowerCAmelCase : Optional[Any] = prompt_length _lowerCAmelCase : Any = prompt_mid_dim super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ): _lowerCAmelCase : Any = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
25
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : List[str] = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) lowerCAmelCase : str = None lowerCAmelCase : Optional[int] = { """7B""": 1_10_08, """13B""": 1_38_24, """30B""": 1_79_20, """65B""": 2_20_16, """70B""": 2_86_72, } lowerCAmelCase : Optional[int] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def lowercase (_A , _A=1 , _A=2_5_6 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowercase (_A ): """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def lowercase (_A , _A ): """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def lowercase (_A , _A , _A , _A=True ): """simple docstring""" os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) ) _lowerCAmelCase : List[str] = NUM_SHARDS[model_size] _lowerCAmelCase : str = params['n_layers'] _lowerCAmelCase : Optional[int] = params['n_heads'] _lowerCAmelCase : int = n_heads // num_shards _lowerCAmelCase : Optional[int] = params['dim'] _lowerCAmelCase : Union[str, Any] = dim // n_heads _lowerCAmelCase : Union[str, Any] = 10_000.0 _lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA _lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads _lowerCAmelCase : Optional[int] = dim // num_key_value_heads else: # compatibility with other checkpoints _lowerCAmelCase : Union[str, Any] = n_heads _lowerCAmelCase : Any = n_heads_per_shard _lowerCAmelCase : Optional[Any] = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'Fetching all parameters from the checkpoint at {input_base_path}.' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded _lowerCAmelCase : List[Any] = [ torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' ) for i in range(_A ) ] _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Union[str, Any] = {'weight_map': {}} for layer_i in range(_A ): _lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : str = { f'model.layers.{layer_i}.self_attn.q_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wq.weight'] ), f'model.layers.{layer_i}.self_attn.k_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wk.weight'] ), f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'], f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'], f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'], f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'], f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'], f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'], f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _lowerCAmelCase : str = { f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][ f'layers.{layer_i}.attention_norm.weight' ].clone(), f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][ f'layers.{layer_i}.ffn_norm.weight' ].clone(), } _lowerCAmelCase : List[str] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) _lowerCAmelCase : Optional[int] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wk.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) _lowerCAmelCase : Dict = torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wv.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) _lowerCAmelCase : Dict = torch.cat( [loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : Tuple = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : int = inv_freq for k, v in state_dict.items(): _lowerCAmelCase : Optional[Any] = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) _lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : List[str] = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: _lowerCAmelCase : List[str] = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): _lowerCAmelCase : int = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs _lowerCAmelCase : Tuple = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) _lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 _lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6 _lowerCAmelCase : List[Any] = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) _lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' ) _lowerCAmelCase : List[Any] = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) _lowerCAmelCase : Any = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) _lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
25
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : str = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """roberta-base""": 5_12, """roberta-large""": 5_12, """roberta-large-mnli""": 5_12, """distilroberta-base""": 5_12, """roberta-base-openai-detector""": 5_12, """roberta-large-openai-detector""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = RobertaTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) ) _lowerCAmelCase : List[Any] = add_prefix_space _lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = add_prefix_space _lowerCAmelCase : Union[str, Any] = 'post_processor' _lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ ) if tokenizer_component_instance: _lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCAmelCase : Any = tuple(state['sep'] ) if "cls" in state: _lowerCAmelCase : str = tuple(state['cls'] ) _lowerCAmelCase : List[str] = False if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : int = add_prefix_space _lowerCAmelCase : Tuple = True if state.get('trim_offsets' , snake_case__ ) != trim_offsets: _lowerCAmelCase : Union[str, Any] = trim_offsets _lowerCAmelCase : Optional[int] = True if changes_to_apply: _lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) ) _lowerCAmelCase : Optional[int] = component_class(**snake_case__ ) setattr(self.backend_tokenizer , snake_case__ , snake_case__ ) @property def a ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value _lowerCAmelCase : Tuple = value def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
25
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = 1 __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None def a ( self ): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
25
1
'''simple docstring''' lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag. lowerCAmelCase : Optional[int] = 1 # The second color of the flag. lowerCAmelCase : int = 2 # The third color of the flag. lowerCAmelCase : Any = (red, white, blue) def lowercase (_A ): """simple docstring""" if not sequence: return [] if len(_A ) == 1: return list(_A ) _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : List[str] = len(_A ) - 1 _lowerCAmelCase : Optional[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid] high -= 1 else: _lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values' raise ValueError(_A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip() lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")] print(F'''{dutch_national_flag_sort(unsorted)}''')
25
'''simple docstring''' lowerCAmelCase : List[str] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCAmelCase : List[str] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
25
1
'''simple docstring''' def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1] _lowerCAmelCase : int = 6 _lowerCAmelCase : Dict = 1 _lowerCAmelCase : Optional[int] = 1_9_0_1 _lowerCAmelCase : Optional[Any] = 0 while year < 2_0_0_1: day += 7 if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] elif day > 2_9 and month == 2: month += 1 _lowerCAmelCase : List[str] = day - 2_9 else: if day > days_per_month[month - 1]: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] if month > 1_2: year += 1 _lowerCAmelCase : Optional[int] = 1 if year < 2_0_0_1 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
25
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
25
1
'''simple docstring''' import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } lowerCAmelCase : Optional[Any] = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } lowerCAmelCase : List[Any] = { """jukebox""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_LYRIC_TOKENS_SIZES __magic_name__ = ["input_ids", "attention_mask"] def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=["v3", "v2", "v2"] , snake_case__=512 , snake_case__=5 , snake_case__="<|endoftext|>" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token super().__init__( unk_token=snake_case__ , n_genres=snake_case__ , version=snake_case__ , max_n_lyric_tokens=snake_case__ , **snake_case__ , ) _lowerCAmelCase : Tuple = version _lowerCAmelCase : Optional[int] = max_n_lyric_tokens _lowerCAmelCase : Tuple = n_genres with open(snake_case__ , encoding='utf-8' ) as vocab_handle: _lowerCAmelCase : List[str] = json.load(snake_case__ ) with open(snake_case__ , encoding='utf-8' ) as vocab_handle: _lowerCAmelCase : List[Any] = json.load(snake_case__ ) with open(snake_case__ , encoding='utf-8' ) as vocab_handle: _lowerCAmelCase : Optional[Any] = json.load(snake_case__ ) _lowerCAmelCase : Any = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: _lowerCAmelCase : Union[str, Any] = oov.replace(R'\-\'' , R'\-+\'' ) _lowerCAmelCase : Optional[int] = regex.compile(snake_case__ ) _lowerCAmelCase : Tuple = {v: k for k, v in self.artists_encoder.items()} _lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.genres_encoder.items()} _lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lyrics_encoder.items()} @property def a ( self ): '''simple docstring''' return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def a ( self ): '''simple docstring''' return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = [self.artists_encoder.get(snake_case__ , 0 ) for artist in list_artists] for genres in range(len(snake_case__ ) ): _lowerCAmelCase : List[str] = [self.genres_encoder.get(snake_case__ , 0 ) for genre in list_genres[genres]] _lowerCAmelCase : Any = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) _lowerCAmelCase : List[str] = [[self.lyrics_encoder.get(snake_case__ , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def a ( self , snake_case__ ): '''simple docstring''' return list(snake_case__ ) def a ( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.prepare_for_tokenization(snake_case__ , snake_case__ , snake_case__ ) _lowerCAmelCase : str = self._tokenize(snake_case__ ) return artist, genre, lyrics def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ): '''simple docstring''' for idx in range(len(self.version ) ): if self.version[idx] == "v3": _lowerCAmelCase : int = artists[idx].lower() _lowerCAmelCase : Tuple = [genres[idx].lower()] else: _lowerCAmelCase : Optional[int] = self._normalize(artists[idx] ) + '.v2' _lowerCAmelCase : str = [ self._normalize(snake_case__ ) + '.v2' for genre in genres[idx].split('_' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": _lowerCAmelCase : Tuple = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' ) _lowerCAmelCase : str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n' _lowerCAmelCase : int = {vocab[index]: index + 1 for index in range(len(snake_case__ ) )} _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Optional[Any] = len(snake_case__ ) + 1 _lowerCAmelCase : List[str] = self.vocab _lowerCAmelCase : Any = {v: k for k, v in self.vocab.items()} _lowerCAmelCase : List[Any] = '' else: _lowerCAmelCase : List[str] = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' ) _lowerCAmelCase : List[str] = self._run_strip_accents(snake_case__ ) _lowerCAmelCase : Optional[int] = lyrics.replace('\\' , '\n' ) _lowerCAmelCase : Tuple = self.out_of_vocab.sub('' , snake_case__ ), [], [] return artists, genres, lyrics def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[str] = unicodedata.normalize('NFD' , snake_case__ ) _lowerCAmelCase : Optional[Any] = [] for char in text: _lowerCAmelCase : Dict = unicodedata.category(snake_case__ ) if cat == "Mn": continue output.append(snake_case__ ) return "".join(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = ( [chr(snake_case__ ) for i in range(ord('a' ) , ord('z' ) + 1 )] + [chr(snake_case__ ) for i in range(ord('A' ) , ord('Z' ) + 1 )] + [chr(snake_case__ ) for i in range(ord('0' ) , ord('9' ) + 1 )] + ['.'] ) _lowerCAmelCase : List[str] = frozenset(snake_case__ ) _lowerCAmelCase : List[str] = re.compile(R'_+' ) _lowerCAmelCase : Optional[Any] = ''.join([c if c in accepted else '_' for c in text.lower()] ) _lowerCAmelCase : Optional[int] = pattern.sub('_' , snake_case__ ).strip('_' ) return text def a ( self , snake_case__ ): '''simple docstring''' return " ".join(snake_case__ ) def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Dict = TensorType(snake_case__ ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( 'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' ) import tensorflow as tf _lowerCAmelCase : List[str] = tf.constant _lowerCAmelCase : str = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' ) import torch _lowerCAmelCase : Union[str, Any] = torch.tensor _lowerCAmelCase : int = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' ) import jax.numpy as jnp # noqa: F811 _lowerCAmelCase : int = jnp.array _lowerCAmelCase : Optional[Any] = _is_jax else: _lowerCAmelCase : List[Any] = np.asarray _lowerCAmelCase : List[Any] = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: _lowerCAmelCase : Dict = [inputs] if not is_tensor(snake_case__ ): _lowerCAmelCase : Tuple = as_tensor(snake_case__ ) except: # noqa E722 raise ValueError( 'Unable to create tensor, you should probably activate truncation and/or padding ' 'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' ) return inputs def __call__( self , snake_case__ , snake_case__ , snake_case__="" , snake_case__="pt" ): '''simple docstring''' _lowerCAmelCase : Dict = [0, 0, 0] _lowerCAmelCase : List[Any] = [artist] * len(self.version ) _lowerCAmelCase : Tuple = [genres] * len(self.version ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.tokenize(snake_case__ , snake_case__ , snake_case__ ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = self._convert_token_to_id(snake_case__ , snake_case__ , snake_case__ ) _lowerCAmelCase : Optional[Any] = [-INFINITY] * len(full_tokens[-1] ) _lowerCAmelCase : List[str] = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=snake_case__ ) for i in range(len(self.version ) ) ] return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase : Tuple = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] ) with open(snake_case__ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=snake_case__ ) ) _lowerCAmelCase : Dict = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] ) with open(snake_case__ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=snake_case__ ) ) _lowerCAmelCase : List[str] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] ) with open(snake_case__ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=snake_case__ ) ) return (artists_file, genres_file, lyrics_file) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = self.artists_decoder.get(snake_case__ ) _lowerCAmelCase : Optional[int] = [self.genres_decoder.get(snake_case__ ) for genre in genres_index] _lowerCAmelCase : Any = [self.lyrics_decoder.get(snake_case__ ) for character in lyric_index] return artist, genres, lyrics
25
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "nat" __magic_name__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Tuple = embed_dim _lowerCAmelCase : Any = depths _lowerCAmelCase : Dict = len(snake_case__ ) _lowerCAmelCase : str = num_heads _lowerCAmelCase : Dict = kernel_size _lowerCAmelCase : Union[str, Any] = mlp_ratio _lowerCAmelCase : int = qkv_bias _lowerCAmelCase : Optional[Any] = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : List[str] = drop_path_rate _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : Tuple = layer_norm_eps _lowerCAmelCase : Dict = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) _lowerCAmelCase : Any = layer_scale_init_value _lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )] _lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
25
1
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example lowerCAmelCase : int = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example lowerCAmelCase : Tuple = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[str] = [] for i in range(len(_A ) ): _lowerCAmelCase : Optional[int] = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours _lowerCAmelCase : int = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(_A ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(_A ) - 1: neighbour_count += cells[i + 1][j] if i < len(_A ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. _lowerCAmelCase : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(_A ) return next_generation def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : str = [] for _ in range(_A ): # Create output image _lowerCAmelCase : Union[str, Any] = Image.new('RGB' , (len(cells[0] ), len(_A )) ) _lowerCAmelCase : str = img.load() # Save cells to image for x in range(len(_A ) ): for y in range(len(cells[0] ) ): _lowerCAmelCase : str = 2_5_5 - cells[y][x] * 2_5_5 _lowerCAmelCase : List[str] = (colour, colour, colour) # Save image images.append(_A ) _lowerCAmelCase : Dict = new_generation(_A ) return images if __name__ == "__main__": lowerCAmelCase : Optional[Any] = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
25
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : str = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """roberta-base""": 5_12, """roberta-large""": 5_12, """roberta-large-mnli""": 5_12, """distilroberta-base""": 5_12, """roberta-base-openai-detector""": 5_12, """roberta-large-openai-detector""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = RobertaTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) ) _lowerCAmelCase : List[Any] = add_prefix_space _lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = add_prefix_space _lowerCAmelCase : Union[str, Any] = 'post_processor' _lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ ) if tokenizer_component_instance: _lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCAmelCase : Any = tuple(state['sep'] ) if "cls" in state: _lowerCAmelCase : str = tuple(state['cls'] ) _lowerCAmelCase : List[str] = False if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : int = add_prefix_space _lowerCAmelCase : Tuple = True if state.get('trim_offsets' , snake_case__ ) != trim_offsets: _lowerCAmelCase : Union[str, Any] = trim_offsets _lowerCAmelCase : Optional[int] = True if changes_to_apply: _lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) ) _lowerCAmelCase : Optional[int] = component_class(**snake_case__ ) setattr(self.backend_tokenizer , snake_case__ , snake_case__ ) @property def a ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value _lowerCAmelCase : Tuple = value def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
25
1
'''simple docstring''' import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden _lowerCAmelCase : Dict = deepcopy(snake_case__ ) elif os.path.exists(snake_case__ ): with io.open(snake_case__ , 'r' , encoding='utf-8' ) as f: _lowerCAmelCase : Any = json.load(snake_case__ ) else: try: _lowerCAmelCase : Tuple = baseaa.urlsafe_baadecode(snake_case__ ).decode('utf-8' ) _lowerCAmelCase : Tuple = json.loads(snake_case__ ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( F'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' ) _lowerCAmelCase : Any = config self.set_stage_and_offload() def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.get_value('zero_optimization.stage' , -1 ) # offload _lowerCAmelCase : Tuple = False if self.is_zeroa() or self.is_zeroa(): _lowerCAmelCase : Union[str, Any] = set(['cpu', 'nvme'] ) _lowerCAmelCase : Optional[Any] = set( [ self.get_value('zero_optimization.offload_optimizer.device' ), self.get_value('zero_optimization.offload_param.device' ), ] ) if len(offload_devices & offload_devices_valid ) > 0: _lowerCAmelCase : Any = True def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.config # find the config node of interest if it exists _lowerCAmelCase : Optional[int] = ds_key_long.split('.' ) _lowerCAmelCase : List[Any] = nodes.pop() for node in nodes: _lowerCAmelCase : str = config.get(snake_case__ ) if config is None: return None, ds_key return config, ds_key def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : List[Any] = self.find_config_node(snake_case__ ) if config is None: return default return config.get(snake_case__ , snake_case__ ) def a ( self , snake_case__ , snake_case__=False ): '''simple docstring''' _lowerCAmelCase : Any = self.config # find the config node of interest if it exists _lowerCAmelCase : int = ds_key_long.split('.' ) for node in nodes: _lowerCAmelCase : List[Any] = config _lowerCAmelCase : Any = config.get(snake_case__ ) if config is None: if must_exist: raise ValueError(F'Can\'t find {ds_key_long} entry in the config: {self.config}' ) else: return # if found remove it if parent_config is not None: parent_config.pop(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.get_value(snake_case__ ) return False if value is None else bool(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[str] = self.get_value(snake_case__ ) return False if value is None else not bool(snake_case__ ) def a ( self ): '''simple docstring''' return self._stage == 2 def a ( self ): '''simple docstring''' return self._stage == 3 def a ( self ): '''simple docstring''' return self._offload class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = engine def a ( self , snake_case__ , **snake_case__ ): '''simple docstring''' self.engine.backward(snake_case__ , **snake_case__ ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , device_placement=snake_case__ , scaler=snake_case__ ) _lowerCAmelCase : List[str] = hasattr(self.optimizer , 'overflow' ) def a ( self , snake_case__=None ): '''simple docstring''' pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def a ( self ): '''simple docstring''' pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def a ( self ): '''simple docstring''' if self.__has_overflow__: return self.optimizer.overflow return False class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=0.001 , snake_case__=0 , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = params _lowerCAmelCase : Any = lr _lowerCAmelCase : Optional[int] = weight_decay _lowerCAmelCase : Any = kwargs class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=None , snake_case__=0 , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = optimizer _lowerCAmelCase : str = total_num_steps _lowerCAmelCase : Optional[Any] = warmup_num_steps _lowerCAmelCase : Dict = kwargs
25
'''simple docstring''' lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag. lowerCAmelCase : Optional[int] = 1 # The second color of the flag. lowerCAmelCase : int = 2 # The third color of the flag. lowerCAmelCase : Any = (red, white, blue) def lowercase (_A ): """simple docstring""" if not sequence: return [] if len(_A ) == 1: return list(_A ) _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : List[str] = len(_A ) - 1 _lowerCAmelCase : Optional[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid] high -= 1 else: _lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values' raise ValueError(_A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip() lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")] print(F'''{dutch_national_flag_sort(unsorted)}''')
25
1
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : str = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = SpeechTaTokenizer __magic_name__ = False __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : Optional[Any] = SpeechTaTokenizer(snake_case__ ) _lowerCAmelCase : Tuple = AddedToken('<mask>' , lstrip=snake_case__ , rstrip=snake_case__ ) _lowerCAmelCase : Tuple = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[str] = 'this is a test' _lowerCAmelCase : Dict = 'this is a test' return input_text, output_text def a ( self , snake_case__ , snake_case__=False , snake_case__=20 , snake_case__=5 ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : List[str] = self.get_input_output_texts(snake_case__ ) _lowerCAmelCase : Optional[Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) _lowerCAmelCase : int = tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ ) return text, ids def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = '<pad>' _lowerCAmelCase : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-4] , 'œ' ) self.assertEqual(vocab_keys[-2] , '<mask>' ) self.assertEqual(vocab_keys[-1] , '<ctc_blank>' ) self.assertEqual(len(snake_case__ ) , 81 ) def a ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=snake_case__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): _lowerCAmelCase : int = tokenizer.vocab_size _lowerCAmelCase : Dict = len(snake_case__ ) self.assertNotEqual(snake_case__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) _lowerCAmelCase : List[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd'] _lowerCAmelCase : Union[str, Any] = tokenizer.add_tokens(snake_case__ ) _lowerCAmelCase : Any = tokenizer.vocab_size _lowerCAmelCase : Optional[int] = len(snake_case__ ) self.assertNotEqual(snake_case__ , 0 ) self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , len(snake_case__ ) ) self.assertEqual(snake_case__ , all_size + len(snake_case__ ) ) _lowerCAmelCase : Tuple = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=snake_case__ ) self.assertGreaterEqual(len(snake_case__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) _lowerCAmelCase : Union[str, Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} _lowerCAmelCase : Optional[Any] = tokenizer.add_special_tokens(snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer.vocab_size _lowerCAmelCase : Union[str, Any] = len(snake_case__ ) self.assertNotEqual(snake_case__ , 0 ) self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , len(snake_case__ ) ) self.assertEqual(snake_case__ , all_size_a + len(snake_case__ ) ) _lowerCAmelCase : List[Any] = tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=snake_case__ ) self.assertGreaterEqual(len(snake_case__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.get_tokenizer() _lowerCAmelCase : List[Any] = tokenizer.tokenize('This is a test' ) # fmt: off self.assertListEqual(snake_case__ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) _lowerCAmelCase : str = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] ) _lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(snake_case__ ) # fmt: off self.assertListEqual(snake_case__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on _lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = [ 'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ' 'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ' 'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ' 'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.', 'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ' 'conditioning on both left and right context in all layers.', 'The quick brown fox jumps over the lazy dog.', ] # fmt: off _lowerCAmelCase : Dict = { 'input_ids': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=snake_case__ , )
25
'''simple docstring''' def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1] _lowerCAmelCase : int = 6 _lowerCAmelCase : Dict = 1 _lowerCAmelCase : Optional[int] = 1_9_0_1 _lowerCAmelCase : Optional[Any] = 0 while year < 2_0_0_1: day += 7 if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] elif day > 2_9 and month == 2: month += 1 _lowerCAmelCase : List[str] = day - 2_9 else: if day > days_per_month[month - 1]: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] if month > 1_2: year += 1 _lowerCAmelCase : Optional[int] = 1 if year < 2_0_0_1 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
25
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "yolos" def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=[512, 864] , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=100 , snake_case__=True , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = hidden_size _lowerCAmelCase : Optional[Any] = num_hidden_layers _lowerCAmelCase : List[Any] = num_attention_heads _lowerCAmelCase : Tuple = intermediate_size _lowerCAmelCase : Tuple = hidden_act _lowerCAmelCase : int = hidden_dropout_prob _lowerCAmelCase : List[Any] = attention_probs_dropout_prob _lowerCAmelCase : Union[str, Any] = initializer_range _lowerCAmelCase : Optional[Any] = layer_norm_eps _lowerCAmelCase : int = image_size _lowerCAmelCase : List[str] = patch_size _lowerCAmelCase : Any = num_channels _lowerCAmelCase : Union[str, Any] = qkv_bias _lowerCAmelCase : Union[str, Any] = num_detection_tokens _lowerCAmelCase : List[str] = use_mid_position_embeddings _lowerCAmelCase : Dict = auxiliary_loss # Hungarian matcher _lowerCAmelCase : int = class_cost _lowerCAmelCase : List[str] = bbox_cost _lowerCAmelCase : List[Any] = giou_cost # Loss coefficients _lowerCAmelCase : Union[str, Any] = bbox_loss_coefficient _lowerCAmelCase : Union[str, Any] = giou_loss_coefficient _lowerCAmelCase : Optional[int] = eos_coefficient class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = version.parse("1.11" ) @property def a ( self ): '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def a ( self ): '''simple docstring''' return 1E-4 @property def a ( self ): '''simple docstring''' return 12
25
'''simple docstring''' def lowercase (_A = 1_0_0_0_0_0_0 ): """simple docstring""" _lowerCAmelCase : Any = set(range(3 , _A , 2 ) ) primes.add(2 ) for p in range(3 , _A , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _A , _A ) ) ) _lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )] for p in primes: for n in range(_A , limit + 1 , _A ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : List[str] = x _lowerCAmelCase : Tuple = y for step in range(_A ): # noqa: B007 _lowerCAmelCase : Tuple = a * a - b * b + x _lowerCAmelCase : List[str] = 2 * a * b + y _lowerCAmelCase : List[Any] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowercase (_A ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return (2_5_5, 2_5_5, 2_5_5) def lowercase (_A ): """simple docstring""" if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_A , 1 , 1 ) ) def lowercase (_A = 8_0_0 , _A = 6_0_0 , _A = -0.6 , _A = 0 , _A = 3.2 , _A = 5_0 , _A = True , ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = Image.new('RGB' , (image_width, image_height) ) _lowerCAmelCase : Dict = img.load() # loop through the image-coordinates for image_x in range(_A ): for image_y in range(_A ): # determine the figure-coordinates based on the image-coordinates _lowerCAmelCase : Union[str, Any] = figure_width / image_width * image_height _lowerCAmelCase : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width _lowerCAmelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height _lowerCAmelCase : Any = get_distance(_A , _A , _A ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: _lowerCAmelCase : Union[str, Any] = get_color_coded_rgb(_A ) else: _lowerCAmelCase : Optional[int] = get_black_and_white_rgb(_A ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowerCAmelCase : Any = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
25
'''simple docstring''' import argparse import os import re lowerCAmelCase : Tuple = """src/transformers""" # Pattern that looks at the indentation in a line. lowerCAmelCase : str = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""") def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = _re_indent.search(_A ) return "" if search is None else search.groups()[0] def lowercase (_A , _A="" , _A=None , _A=None ): """simple docstring""" _lowerCAmelCase : int = 0 _lowerCAmelCase : Dict = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 _lowerCAmelCase : Dict = ['\n'.join(lines[:index] )] else: _lowerCAmelCase : str = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCAmelCase : List[Any] = [lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_A ) ) if index < len(_A ) - 1: _lowerCAmelCase : Union[str, Any] = [lines[index + 1]] index += 1 else: _lowerCAmelCase : Union[str, Any] = [] else: blocks.append('\n'.join(_A ) ) _lowerCAmelCase : List[str] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append('\n'.join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append('\n'.join(lines[index:] ) ) return blocks def lowercase (_A ): """simple docstring""" def _inner(_A ): return key(_A ).lower().replace('_' , '' ) return _inner def lowercase (_A , _A=None ): """simple docstring""" def noop(_A ): return x if key is None: _lowerCAmelCase : List[Any] = noop # Constants are all uppercase, they go first. _lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. _lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()] _lowerCAmelCase : Dict = ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def lowercase (_A ): """simple docstring""" def _replace(_A ): _lowerCAmelCase : Dict = match.groups()[0] if "," not in imports: return f'[{imports}]' _lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : int = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" _lowerCAmelCase : Tuple = import_statement.split('\n' ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1 _lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] ) _lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : List[str] = keys[:-1] _lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line _lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A ) return import_statement def lowercase (_A , _A=True ): """simple docstring""" with open(_A , encoding='utf-8' ) as f: _lowerCAmelCase : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCAmelCase : Tuple = split_code_in_indented_blocks( _A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCAmelCase : Tuple = main_blocks[block_idx] _lowerCAmelCase : int = block.split('\n' ) # Get to the start of the imports. _lowerCAmelCase : Tuple = 0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCAmelCase : Dict = len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. _lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] ) _lowerCAmelCase : Tuple = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None] _lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCAmelCase : int = 0 _lowerCAmelCase : Optional[Any] = [] for i in range(len(_A ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: _lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. _lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(_A ) ) def lowercase (_A=True ): """simple docstring""" _lowerCAmelCase : int = [] for root, _, files in os.walk(_A ): if "__init__.py" in files: _lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A ) if result: _lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCAmelCase : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
25
1
'''simple docstring''' import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" pass @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @property def a ( self ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = ort.SessionOptions() _lowerCAmelCase : Any = False return options def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo.png' ) _lowerCAmelCase : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo_mask.png' ) _lowerCAmelCase : str = OnnxStableDiffusionInpaintPipeline.from_pretrained( 'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : List[Any] = 'A red cat sitting on a park bench' _lowerCAmelCase : Dict = np.random.RandomState(0 ) _lowerCAmelCase : Any = pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images _lowerCAmelCase : Any = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) _lowerCAmelCase : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo.png' ) _lowerCAmelCase : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo_mask.png' ) _lowerCAmelCase : List[str] = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' ) _lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipeline.from_pretrained( 'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Dict = 'A red cat sitting on a park bench' _lowerCAmelCase : Union[str, Any] = np.random.RandomState(0 ) _lowerCAmelCase : Optional[int] = pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images _lowerCAmelCase : Optional[int] = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) _lowerCAmelCase : Optional[Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
25
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = KandinskyVaaInpaintPipeline __magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"] __magic_name__ = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] __magic_name__ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__ = False @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return self.time_input_dim @property def a ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def a ( self ): '''simple docstring''' return 100 @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ ) return model @property def a ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs ) return model def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.dummy_unet _lowerCAmelCase : List[Any] = self.dummy_movq _lowerCAmelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , ) _lowerCAmelCase : Any = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create init_image _lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) ) # create mask _lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase : Dict = 0 if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Optional[int] = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = 'cpu' _lowerCAmelCase : int = self.get_dummy_components() _lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) ) _lowerCAmelCase : int = output.images _lowerCAmelCase : int = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] _lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] _lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(F'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : List[str] = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def a ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) _lowerCAmelCase : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : List[str] = 'a hat' _lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) _lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) _lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _lowerCAmelCase : Optional[Any] = pipeline( image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
25
1
'''simple docstring''' lowerCAmelCase : int = 2_56 # Modulus to hash a string lowerCAmelCase : Tuple = 1_00_00_03 def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : int = len(_A ) _lowerCAmelCase : Union[str, Any] = len(_A ) if p_len > t_len: return False _lowerCAmelCase : int = 0 _lowerCAmelCase : Optional[Any] = 0 _lowerCAmelCase : int = 1 # Calculating the hash of pattern and substring of text for i in range(_A ): _lowerCAmelCase : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _lowerCAmelCase : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _lowerCAmelCase : List[Any] = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _lowerCAmelCase : Dict = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowercase (): """simple docstring""" _lowerCAmelCase : str = 'abc1abc12' _lowerCAmelCase : Union[str, Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc' _lowerCAmelCase : Union[str, Any] = 'alskfjaldsk23adsfabcabc' assert rabin_karp(_A , _A ) and not rabin_karp(_A , _A ) # Test 2) _lowerCAmelCase : Union[str, Any] = 'ABABX' _lowerCAmelCase : Any = 'ABABZABABYABABX' assert rabin_karp(_A , _A ) # Test 3) _lowerCAmelCase : List[Any] = 'AAAB' _lowerCAmelCase : int = 'ABAAAAAB' assert rabin_karp(_A , _A ) # Test 4) _lowerCAmelCase : Any = 'abcdabcy' _lowerCAmelCase : str = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(_A , _A ) # Test 5) _lowerCAmelCase : Any = 'Lü' _lowerCAmelCase : List[str] = 'Lüsai' assert rabin_karp(_A , _A ) _lowerCAmelCase : int = 'Lue' assert not rabin_karp(_A , _A ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
25
'''simple docstring''' from __future__ import annotations from typing import Any def lowercase (_A ): """simple docstring""" if not postfix_notation: return 0 _lowerCAmelCase : int = {'+', '-', '*', '/'} _lowerCAmelCase : list[Any] = [] for token in postfix_notation: if token in operations: _lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_A ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class UpperCamelCase__ ( datasets.BeamBasedBuilder ): """simple docstring""" def a ( self ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=snake_case__ , ) def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )] def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) class UpperCamelCase__ ( datasets.BeamBasedBuilder ): """simple docstring""" def a ( self ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=snake_case__ , ) def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} ) ] def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) def lowercase (): """simple docstring""" return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )] def lowercase (): """simple docstring""" return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )] class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @require_beam def a ( self ): '''simple docstring''' _lowerCAmelCase : int = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _lowerCAmelCase : Dict = DummyBeamDataset(cache_dir=snake_case__ , beam_runner='DirectRunner' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) ) _lowerCAmelCase : Tuple = builder.as_dataset() self.assertEqual(dset['train'].num_rows , snake_case__ ) self.assertEqual(dset['train'].info.splits['train'].num_examples , snake_case__ ) self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset @require_beam def a ( self ): '''simple docstring''' import apache_beam as beam _lowerCAmelCase : List[str] = beam.io.parquetio.WriteToParquet _lowerCAmelCase : Optional[Any] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _lowerCAmelCase : Optional[int] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner='DirectRunner' ) with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock: _lowerCAmelCase : Any = partial(snake_case__ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) ) _lowerCAmelCase : Any = builder.as_dataset() self.assertEqual(dset['train'].num_rows , snake_case__ ) self.assertEqual(dset['train'].info.splits['train'].num_examples , snake_case__ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset @require_beam def a ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: _lowerCAmelCase : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _lowerCAmelCase : Optional[int] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner='DirectRunner' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train.arrow' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) ) _lowerCAmelCase : Dict = builder.as_dataset() self.assertEqual(dset['train'].num_rows , snake_case__ ) self.assertEqual(dset['train'].info.splits['train'].num_examples , snake_case__ ) self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset
25
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mobilenet_v2" def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Union[str, Any] = image_size _lowerCAmelCase : List[Any] = depth_multiplier _lowerCAmelCase : List[Any] = depth_divisible_by _lowerCAmelCase : Optional[Any] = min_depth _lowerCAmelCase : str = expand_ratio _lowerCAmelCase : str = output_stride _lowerCAmelCase : Any = first_layer_is_expansion _lowerCAmelCase : int = finegrained_output _lowerCAmelCase : str = hidden_act _lowerCAmelCase : List[str] = tf_padding _lowerCAmelCase : Optional[int] = classifier_dropout_prob _lowerCAmelCase : int = initializer_range _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : str = semantic_loss_ignore_index class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = version.parse("1.11" ) @property def a ( self ): '''simple docstring''' return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' return 1E-4
25
1
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase (_A , _A , _A , _A , _A ): """simple docstring""" with open(_A ) as metadata_file: _lowerCAmelCase : List[Any] = json.load(_A ) _lowerCAmelCase : int = LukeConfig(use_entity_aware_attention=_A , **metadata['model_config'] ) # Load in the weights from the checkpoint_path _lowerCAmelCase : Tuple = torch.load(_A , map_location='cpu' ) # Load the entity vocab file _lowerCAmelCase : Any = load_entity_vocab(_A ) _lowerCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks _lowerCAmelCase : Dict = AddedToken('<ent>' , lstrip=_A , rstrip=_A ) _lowerCAmelCase : Optional[Any] = AddedToken('<ent2>' , lstrip=_A , rstrip=_A ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(_A ) with open(os.path.join(_A , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(_A , _A ) _lowerCAmelCase : str = LukeTokenizer.from_pretrained(_A ) # Initialize the embeddings of the special tokens _lowerCAmelCase : int = state_dict['embeddings.word_embeddings.weight'] _lowerCAmelCase : Optional[Any] = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) _lowerCAmelCase : List[str] = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) _lowerCAmelCase : str = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _lowerCAmelCase : Optional[int] = f'encoder.layer.{layer_index}.attention.self.' _lowerCAmelCase : Tuple = state_dict[prefix + matrix_name] _lowerCAmelCase : int = state_dict[prefix + matrix_name] _lowerCAmelCase : Tuple = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _lowerCAmelCase : Any = state_dict['entity_embeddings.entity_embeddings.weight'] _lowerCAmelCase : Any = entity_emb[entity_vocab['[MASK]']] _lowerCAmelCase : Optional[Any] = LukeModel(config=_A ).eval() _lowerCAmelCase , _lowerCAmelCase : Dict = model.load_state_dict(_A , strict=_A ) if not (len(_A ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f'Missing keys {", ".join(_A )}. Expected only missing embeddings.position_ids' ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' ) # Check outputs _lowerCAmelCase : Dict = LukeTokenizer.from_pretrained(_A , task='entity_classification' ) _lowerCAmelCase : Dict = ( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) _lowerCAmelCase : Dict = (3_9, 4_2) _lowerCAmelCase : List[str] = tokenizer(_A , entity_spans=[span] , add_prefix_space=_A , return_tensors='pt' ) _lowerCAmelCase : List[Any] = model(**_A ) # Verify word hidden states if model_size == "large": _lowerCAmelCase : Union[str, Any] = torch.Size((1, 4_2, 1_0_2_4) ) _lowerCAmelCase : Dict = torch.tensor( [[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] ) else: # base _lowerCAmelCase : List[Any] = torch.Size((1, 4_2, 7_6_8) ) _lowerCAmelCase : Optional[Any] = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": _lowerCAmelCase : str = torch.Size((1, 1, 1_0_2_4) ) _lowerCAmelCase : List[str] = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] ) else: # base _lowerCAmelCase : str = torch.Size((1, 1, 7_6_8) ) _lowerCAmelCase : Dict = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' f' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _A , atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(_A ) ) model.save_pretrained(_A ) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Any = {} with open(_A , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(_A ): _lowerCAmelCase , _lowerCAmelCase : Tuple = line.rstrip().split('\t' ) _lowerCAmelCase : str = index return entity_vocab if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""") parser.add_argument( """--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration.""" ) parser.add_argument( """--entity_vocab_path""", default=None, type=str, help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model.""" ) parser.add_argument( """--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted.""" ) lowerCAmelCase : Optional[int] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
25
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER _lowerCAmelCase : Optional[int] = 'pt' _lowerCAmelCase : Tuple = 'tf' def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ ) model_tf.save_pretrained(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = 'mock_framework' # Framework provided - return whatever the user provides _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(snake_case__ ): _lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase : Any = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase : int = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase : str = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): with self.assertRaises(snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
25
1
'''simple docstring''' def lowercase (_A ): """simple docstring""" if not isinstance(_A , _A ): raise TypeError('Input value must be an \'int\' type' ) _lowerCAmelCase : int = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
25
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase : Optional[int] = None lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Any = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """facebook/nllb-large-en-ro""": 10_24, """facebook/nllb-200-distilled-600M""": 10_24, } # fmt: off lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = NllbTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token _lowerCAmelCase : Dict = legacy_behaviour super().__init__( vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[str] = vocab_file _lowerCAmelCase : int = False if not self.vocab_file else True _lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _lowerCAmelCase : Any = { lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn' _lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang ) _lowerCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def a ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _lowerCAmelCase : Optional[Any] = src_lang _lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ ) _lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ ) _lowerCAmelCase : Optional[Any] = tgt_lang_id return inputs def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[str] = src_lang _lowerCAmelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def a ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def a ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : Dict = [] _lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : int = [self.eos_token_id] _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : int = [] _lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : List[str] = [self.eos_token_id] _lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return _lowerCAmelCase : Union[str, Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
25
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = tempfile.mkdtemp() _lowerCAmelCase : int = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) _lowerCAmelCase : int = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } _lowerCAmelCase : Dict = os.path.join(self.tmpdirname , snake_case__ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(snake_case__ , snake_case__ ) def a ( self , **snake_case__ ): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def a ( self , **snake_case__ ): '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ ) def a ( self , **snake_case__ ): '''simple docstring''' return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ ) def a ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _lowerCAmelCase : Any = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.get_tokenizer() _lowerCAmelCase : int = self.get_rust_tokenizer() _lowerCAmelCase : int = self.get_image_processor() _lowerCAmelCase : List[Any] = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor_slow.save_pretrained(self.tmpdirname ) _lowerCAmelCase : Optional[int] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ ) _lowerCAmelCase : Optional[Any] = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor_fast.save_pretrained(self.tmpdirname ) _lowerCAmelCase : int = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case__ ) self.assertIsInstance(processor_fast.tokenizer , snake_case__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case__ ) self.assertIsInstance(processor_fast.image_processor , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 ) _lowerCAmelCase : Any = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.get_image_processor() _lowerCAmelCase : Optional[Any] = self.get_tokenizer() _lowerCAmelCase : List[str] = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : List[Any] = self.prepare_image_inputs() _lowerCAmelCase : Tuple = image_processor(snake_case__ , return_tensors='np' ) _lowerCAmelCase : Optional[Any] = processor(images=snake_case__ , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.get_image_processor() _lowerCAmelCase : Optional[int] = self.get_tokenizer() _lowerCAmelCase : Optional[Any] = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : Optional[int] = 'lower newer' _lowerCAmelCase : Any = processor(text=snake_case__ ) _lowerCAmelCase : List[str] = tokenizer(snake_case__ , padding='max_length' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.get_image_processor() _lowerCAmelCase : Any = self.get_tokenizer() _lowerCAmelCase : Tuple = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : Tuple = 'lower newer' _lowerCAmelCase : int = self.prepare_image_inputs() _lowerCAmelCase : Dict = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.get_image_processor() _lowerCAmelCase : Dict = self.get_tokenizer() _lowerCAmelCase : Tuple = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCAmelCase : Dict = processor.batch_decode(snake_case__ ) _lowerCAmelCase : List[Any] = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.get_image_processor() _lowerCAmelCase : Any = self.get_tokenizer() _lowerCAmelCase : Tuple = AlignProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) _lowerCAmelCase : Tuple = 'lower newer' _lowerCAmelCase : int = self.prepare_image_inputs() _lowerCAmelCase : str = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
25
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase : List[str] = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def lowercase (_A ): """simple docstring""" for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""") lowerCAmelCase : Dict = parser.parse_args() if args.check_lib: lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""") lowerCAmelCase : int = Path(transformers_module.__file__).parent else: lowerCAmelCase : int = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
25
1
'''simple docstring''' import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCAmelCase : Dict = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase : Any = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCAmelCase : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING lowerCAmelCase : Optional[int] = { # used to compute the property `self.chunk_length` """EncodecConfig""": ["""overlap"""], # used as `self.bert_model = BertModel(config, ...)` """DPRConfig""": True, # not used in modeling files, but it's an important information """FSMTConfig""": ["""langs"""], # used internally in the configuration class file """GPTNeoConfig""": ["""attention_types"""], # used internally in the configuration class file """EsmConfig""": ["""is_folding_model"""], # used during training (despite we don't have training script for these models yet) """Mask2FormerConfig""": ["""ignore_value"""], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) """OneFormerConfig""": ["""ignore_value""", """norm"""], # used during preprocessing and collation, see `collating_graphormer.py` """GraphormerConfig""": ["""spatial_pos_max"""], # used internally in the configuration class file """T5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally """MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], """UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], # used internally in the configuration class file """LongT5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file """SwitchTransformersConfig""": ["""feed_forward_proj"""], # having default values other than `1e-5` - we can't fix them without breaking """BioGptConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """GLPNConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """SegformerConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """CvtConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """PerceiverConfig""": ["""layer_norm_eps"""], # used internally to calculate the feature size """InformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate `mlp_dim` """SamVisionConfig""": ["""mlp_ratio"""], # For (head) training, but so far not implemented """ClapAudioConfig""": ["""num_classes"""], # Not used, but providing useful information to users """SpeechT5HifiGanConfig""": ["""sampling_rate"""], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { """CLIPSegConfig""": True, """DeformableDetrConfig""": True, """DetaConfig""": True, """DinatConfig""": True, """DonutSwinConfig""": True, """EfficientFormerConfig""": True, """FSMTConfig""": True, """JukeboxConfig""": True, """LayoutLMv2Config""": True, """MaskFormerSwinConfig""": True, """MT5Config""": True, """NatConfig""": True, """OneFormerConfig""": True, """PerceiverConfig""": True, """RagConfig""": True, """SpeechT5Config""": True, """SwinConfig""": True, """Swin2SRConfig""": True, """Swinv2Config""": True, """SwitchTransformersConfig""": True, """TableTransformerConfig""": True, """TapasConfig""": True, """TransfoXLConfig""": True, """UniSpeechConfig""": True, """UniSpeechSatConfig""": True, """WavLMConfig""": True, """WhisperConfig""": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) """JukeboxPriorConfig""": True, # TODO: @Younes (for `is_decoder`) """Pix2StructTextConfig""": True, } ) def lowercase (_A , _A , _A , _A ): """simple docstring""" _lowerCAmelCase : Optional[int] = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( f'config.{attribute}' in modeling_source or f'getattr(config, "{attribute}"' in modeling_source or f'getattr(self.config, "{attribute}"' in modeling_source ): _lowerCAmelCase : Tuple = True # Deal with multi-line cases elif ( re.search( rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , _A , ) is not None ): _lowerCAmelCase : Union[str, Any] = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: _lowerCAmelCase : Union[str, Any] = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files _lowerCAmelCase : int = [ 'bos_index', 'eos_index', 'pad_index', 'unk_index', 'mask_index', 'image_size', 'use_cache', 'out_features', 'out_indices', ] _lowerCAmelCase : int = ['encoder_no_repeat_ngram_size'] # Special cases to be allowed _lowerCAmelCase : Dict = True if not attribute_used: _lowerCAmelCase : Any = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: _lowerCAmelCase : Union[str, Any] = True elif attribute in ["tie_word_embeddings"] and default_value is False: _lowerCAmelCase : str = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: _lowerCAmelCase : Union[str, Any] = True elif attribute.endswith('_token_id' ): _lowerCAmelCase : List[Any] = True # configuration class specific cases if not case_allowed: _lowerCAmelCase : str = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) _lowerCAmelCase : int = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Tuple = dict(inspect.signature(config_class.__init__ ).parameters ) _lowerCAmelCase : Optional[Any] = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']] _lowerCAmelCase : Tuple = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass _lowerCAmelCase : str = {} if len(config_class.attribute_map ) > 0: _lowerCAmelCase : List[Any] = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files _lowerCAmelCase : Optional[Any] = inspect.getsourcefile(_A ) _lowerCAmelCase : int = os.path.dirname(_A ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. _lowerCAmelCase : str = [os.path.join(_A , _A ) for fn in os.listdir(_A ) if fn.startswith('modeling_' )] # Get the source code strings _lowerCAmelCase : List[Any] = [] for path in modeling_paths: if os.path.isfile(_A ): with open(_A ) as fp: modeling_sources.append(fp.read() ) _lowerCAmelCase : str = [] for config_param, default_value in zip(_A , _A ): # `attributes` here is all the variant names for `config_param` _lowerCAmelCase : List[str] = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(_A , _A , _A , _A ): unused_attributes.append(attributes[0] ) return sorted(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : List[Any] = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) _lowerCAmelCase : Optional[Any] = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda _A : inspect.isclass(_A ) and issubclass(_A , _A ) and inspect.getmodule(_A ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: _lowerCAmelCase : int = check_config_attributes_being_used(_A ) if len(_A ) > 0: _lowerCAmelCase : Optional[int] = unused_attributes if len(_A ) > 0: _lowerCAmelCase : Any = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n' for name, attributes in configs_with_unused_attributes.items(): error += f'{name}: {attributes}\n' raise ValueError(_A ) if __name__ == "__main__": check_config_attributes()
25
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" _lowerCAmelCase : List[str] = '' _lowerCAmelCase : Any = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i _lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )] # for each character in new_string find corresponding palindromic string _lowerCAmelCase : Any = 0 for j in range(len(_A ) ): _lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowerCAmelCase : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741 _lowerCAmelCase : int = j + k - 1 # update max_length and start position if max_length < length[j]: _lowerCAmelCase : Dict = length[j] _lowerCAmelCase : Optional[int] = j # create that string _lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py lowerCAmelCase : Dict = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ lowerCAmelCase : str = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ lowerCAmelCase : Optional[int] = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def a ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def a ( self , snake_case__ , snake_case__ , snake_case__=4 , snake_case__=False ): '''simple docstring''' _lowerCAmelCase : Any = compute_bleu( reference_corpus=snake_case__ , translation_corpus=snake_case__ , max_order=snake_case__ , smooth=snake_case__ ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : int = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
25
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = 0 __magic_name__ = False __magic_name__ = 3.0 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _lowerCAmelCase : str = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , snake_case__ ) @require_multi_gpu def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00) lowerCAmelCase : List[str] = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase : List[Any] = """""" lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
25
1
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCAmelCase : Optional[Any] = """platform""" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def lowercase (_A , _A , _A=None , _A=None , _A=None , _A=None , _A=None , _A=None , ): """simple docstring""" if attention_mask is None: _lowerCAmelCase : Tuple = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _lowerCAmelCase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _lowerCAmelCase : int = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowerCAmelCase : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowerCAmelCase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=16 , snake_case__=2 , snake_case__=4 , snake_case__=4 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=32 , snake_case__=2 , snake_case__=1 , snake_case__=0 , snake_case__=0.02 , ): '''simple docstring''' _lowerCAmelCase : Tuple = parent _lowerCAmelCase : str = batch_size _lowerCAmelCase : Union[str, Any] = seq_length _lowerCAmelCase : Optional[Any] = is_training _lowerCAmelCase : Union[str, Any] = use_labels _lowerCAmelCase : Tuple = vocab_size _lowerCAmelCase : Tuple = hidden_size _lowerCAmelCase : Optional[int] = num_hidden_layers _lowerCAmelCase : List[Any] = num_attention_heads _lowerCAmelCase : Optional[int] = intermediate_size _lowerCAmelCase : Any = hidden_act _lowerCAmelCase : Any = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : Optional[Any] = max_position_embeddings _lowerCAmelCase : str = eos_token_id _lowerCAmelCase : List[Any] = pad_token_id _lowerCAmelCase : str = bos_token_id _lowerCAmelCase : str = initializer_range def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) _lowerCAmelCase : int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) _lowerCAmelCase : Optional[Any] = shift_tokens_right(snake_case__ , 1 , 2 ) _lowerCAmelCase : Dict = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=snake_case__ , ) _lowerCAmelCase : List[Any] = prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ ) return config, inputs_dict def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Tuple = self.prepare_config_and_inputs() return config, inputs_dict def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = 20 _lowerCAmelCase : Dict = model_class_name(snake_case__ ) _lowerCAmelCase : Tuple = model.encode(inputs_dict['input_ids'] ) _lowerCAmelCase , _lowerCAmelCase : Optional[int] = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _lowerCAmelCase : List[str] = model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ ) _lowerCAmelCase : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) _lowerCAmelCase : Union[str, Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowerCAmelCase : Any = model.decode( decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , ) _lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _lowerCAmelCase : str = model.decode( decoder_input_ids[:, -1:] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case__ , ) _lowerCAmelCase : Optional[int] = model.decode(snake_case__ , snake_case__ ) _lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = 20 _lowerCAmelCase : List[Any] = model_class_name(snake_case__ ) _lowerCAmelCase : Tuple = model.encode(inputs_dict['input_ids'] ) _lowerCAmelCase , _lowerCAmelCase : List[str] = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _lowerCAmelCase : str = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ ) _lowerCAmelCase : Union[str, Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowerCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , ) _lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _lowerCAmelCase : int = model.decode( decoder_input_ids[:, -1:] , snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case__ , decoder_position_ids=snake_case__ , ) _lowerCAmelCase : Tuple = model.decode(snake_case__ , snake_case__ , decoder_attention_mask=snake_case__ ) _lowerCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) @require_flax class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = 9_9 def a ( self ): '''simple docstring''' _lowerCAmelCase : int = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) _lowerCAmelCase : Optional[Any] = input_ids.shape[0] _lowerCAmelCase : Tuple = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = self._get_config_and_data() _lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(snake_case__ ) _lowerCAmelCase : Union[str, Any] = lm_model(input_ids=snake_case__ ) _lowerCAmelCase : Any = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['logits'].shape , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) _lowerCAmelCase : List[Any] = FlaxBlenderbotForConditionalGeneration(snake_case__ ) _lowerCAmelCase : Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) _lowerCAmelCase : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) _lowerCAmelCase : Tuple = lm_model(input_ids=snake_case__ , decoder_input_ids=snake_case__ ) _lowerCAmelCase : Any = (*summary.shape, config.vocab_size) self.assertEqual(outputs['logits'].shape , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) _lowerCAmelCase : Optional[int] = shift_tokens_right(snake_case__ , 1 , 2 ) _lowerCAmelCase : Optional[int] = np.equal(snake_case__ , 1 ).astype(np.floataa ).sum() _lowerCAmelCase : Any = np.equal(snake_case__ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(snake_case__ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase , SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = True __magic_name__ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) __magic_name__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = FlaxBlenderbotModelTester(self ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowerCAmelCase : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ ) _lowerCAmelCase : Union[str, Any] = model_class(snake_case__ ) @jax.jit def encode_jitted(snake_case__ , snake_case__=None , **snake_case__ ): return model.encode(input_ids=snake_case__ , attention_mask=snake_case__ ) with self.subTest('JIT Enabled' ): _lowerCAmelCase : Union[str, Any] = encode_jitted(**snake_case__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _lowerCAmelCase : str = encode_jitted(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) for jitted_output, output in zip(snake_case__ , snake_case__ ): self.assertEqual(jitted_output.shape , output.shape ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowerCAmelCase : Any = model_class(snake_case__ ) _lowerCAmelCase : Tuple = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) _lowerCAmelCase : Union[str, Any] = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(snake_case__ , snake_case__ , snake_case__ ): return model.decode( decoder_input_ids=snake_case__ , decoder_attention_mask=snake_case__ , encoder_outputs=snake_case__ , ) with self.subTest('JIT Enabled' ): _lowerCAmelCase : Union[str, Any] = decode_jitted(**snake_case__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _lowerCAmelCase : int = decode_jitted(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) for jitted_output, output in zip(snake_case__ , snake_case__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def a ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: _lowerCAmelCase : Dict = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _lowerCAmelCase : str = np.ones((1, 1) ) * model.config.eos_token_id _lowerCAmelCase : Any = model(snake_case__ ) self.assertIsNotNone(snake_case__ ) @unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25} _lowerCAmelCase : Tuple = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True} _lowerCAmelCase : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=snake_case__ ) _lowerCAmelCase : Dict = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' ) _lowerCAmelCase : List[Any] = ['Sam'] _lowerCAmelCase : str = tokenizer(snake_case__ , return_tensors='jax' ) _lowerCAmelCase : Optional[Any] = model.generate(**snake_case__ , **snake_case__ ) _lowerCAmelCase : Any = 'Sam is a great name. It means "sun" in Gaelic.' _lowerCAmelCase : str = tokenizer.batch_decode(snake_case__ , **snake_case__ ) assert generated_txt[0].strip() == tgt_text
25
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "trajectory_transformer" __magic_name__ = ["past_key_values"] __magic_name__ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = action_weight _lowerCAmelCase : Optional[int] = reward_weight _lowerCAmelCase : Union[str, Any] = value_weight _lowerCAmelCase : List[str] = max_position_embeddings _lowerCAmelCase : Tuple = block_size _lowerCAmelCase : List[Any] = action_dim _lowerCAmelCase : List[Any] = observation_dim _lowerCAmelCase : Union[str, Any] = transition_dim _lowerCAmelCase : Tuple = learning_rate _lowerCAmelCase : int = n_layer _lowerCAmelCase : Any = n_head _lowerCAmelCase : Tuple = n_embd _lowerCAmelCase : Optional[Any] = embd_pdrop _lowerCAmelCase : Union[str, Any] = attn_pdrop _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : List[Any] = layer_norm_eps _lowerCAmelCase : Union[str, Any] = kaiming_initializer_range _lowerCAmelCase : List[Any] = use_cache super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
25
1
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' super().__init__() self.register_modules( vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , ) def a ( self , snake_case__ = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _lowerCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case__ ) def a ( self ): '''simple docstring''' self.enable_attention_slicing(snake_case__ ) @torch.no_grad() def __call__( self , snake_case__ , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , snake_case__ = None , **snake_case__ , ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Tuple = 1 elif isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : List[str] = len(snake_case__ ) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(snake_case__ )}.' ) # get prompt text embeddings _lowerCAmelCase : Union[str, Any] = self.tokenizer( snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) _lowerCAmelCase : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _lowerCAmelCase : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F' {self.tokenizer.model_max_length} tokens: {removed_text}' ) _lowerCAmelCase : List[str] = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: _lowerCAmelCase : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = text_embeddings.shape _lowerCAmelCase : int = text_embeddings.repeat(1 , snake_case__ , 1 ) _lowerCAmelCase : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _lowerCAmelCase : Tuple = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _lowerCAmelCase : List[str] if negative_prompt is None: _lowerCAmelCase : Dict = [''] elif type(snake_case__ ) is not type(snake_case__ ): raise TypeError( F'`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !=' F' {type(snake_case__ )}.' ) elif isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : List[str] = [negative_prompt] elif batch_size != len(snake_case__ ): raise ValueError( F'`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:' F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches' ' the batch size of `prompt`.' ) else: _lowerCAmelCase : Optional[Any] = negative_prompt _lowerCAmelCase : Tuple = text_input_ids.shape[-1] _lowerCAmelCase : Optional[Any] = self.tokenizer( snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , ) _lowerCAmelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _lowerCAmelCase : Dict = uncond_embeddings.shape[1] _lowerCAmelCase : Any = uncond_embeddings.repeat(snake_case__ , snake_case__ , 1 ) _lowerCAmelCase : List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _lowerCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) _lowerCAmelCase : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) _lowerCAmelCase : Optional[int] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps _lowerCAmelCase : Tuple = torch.randn( snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(self.device ) _lowerCAmelCase : Optional[Any] = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to( self.device ) else: _lowerCAmelCase : List[str] = torch.randn( snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ ) _lowerCAmelCase : List[str] = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ ) else: if latents_reference.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) _lowerCAmelCase : str = latents_reference.to(self.device ) _lowerCAmelCase : Dict = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images _lowerCAmelCase : Dict = (latents_shape[3] - latents_shape_reference[3]) // 2 _lowerCAmelCase : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2 _lowerCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx _lowerCAmelCase : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy _lowerCAmelCase : int = 0 if dx < 0 else dx _lowerCAmelCase : str = 0 if dy < 0 else dy _lowerCAmelCase : Tuple = max(-dx , 0 ) _lowerCAmelCase : str = max(-dy , 0 ) # import pdb # pdb.set_trace() _lowerCAmelCase : Union[str, Any] = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(snake_case__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand _lowerCAmelCase : Dict = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _lowerCAmelCase : Dict = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _lowerCAmelCase : Optional[int] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _lowerCAmelCase : Optional[int] = {} if accepts_eta: _lowerCAmelCase : Optional[Any] = eta for i, t in enumerate(self.progress_bar(snake_case__ ) ): # expand the latents if we are doing classifier free guidance _lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _lowerCAmelCase : List[str] = self.scheduler.scale_model_input(snake_case__ , snake_case__ ) # predict the noise residual _lowerCAmelCase : Union[str, Any] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample # perform guidance if do_classifier_free_guidance: _lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 ) _lowerCAmelCase : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 _lowerCAmelCase : List[Any] = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case__ , snake_case__ , snake_case__ ) _lowerCAmelCase : Any = 1 / 0.1_8215 * latents _lowerCAmelCase : Optional[int] = self.vae.decode(snake_case__ ).sample _lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _lowerCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: _lowerCAmelCase : Union[str, Any] = self.feature_extractor(self.numpy_to_pil(snake_case__ ) , return_tensors='pt' ).to( self.device ) _lowerCAmelCase , _lowerCAmelCase : Dict = self.safety_checker( images=snake_case__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: _lowerCAmelCase : Dict = None if output_type == "pil": _lowerCAmelCase : str = self.numpy_to_pil(snake_case__ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
25
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase : Union[str, Any] = 25_00_04 lowerCAmelCase : int = 25_00_20 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = MBartaaTokenizer __magic_name__ = MBartaaTokenizerFast __magic_name__ = True __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = '<s>' _lowerCAmelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case__ ) , 1054 ) def a ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) _lowerCAmelCase : Any = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def a ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) _lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=True _lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=False _lowerCAmelCase : Optional[int] = tempfile.mkdtemp() _lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = "facebook/mbart-large-50-one-to-many-mmt" __magic_name__ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] __magic_name__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] __magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def a ( cls ): '''simple docstring''' _lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) _lowerCAmelCase : Dict = 1 return cls def a ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) def a ( self ): '''simple docstring''' self.assertIn(snake_case__ , self.tokenizer.all_special_ids ) _lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] _lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) _lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertNotIn(self.tokenizer.eos_token , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , snake_case__ ) _lowerCAmelCase : List[str] = 10 _lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0] self.assertEqual(ids[0] , snake_case__ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case__ ) _lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' ) _lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) _lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' ) _lowerCAmelCase : str = self.tokenizer( text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' ) _lowerCAmelCase : List[Any] = targets['input_ids'] _lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(snake_case__ ) , { # en_XX, A, test, EOS 'input_ids': [[25_0004, 62, 3034, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_0001, } , )
25
1
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) lowerCAmelCase : str = None lowerCAmelCase : Optional[int] = { """7B""": 1_10_08, """13B""": 1_38_24, """30B""": 1_79_20, """65B""": 2_20_16, """70B""": 2_86_72, } lowerCAmelCase : Optional[int] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def lowercase (_A , _A=1 , _A=2_5_6 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowercase (_A ): """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def lowercase (_A , _A ): """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def lowercase (_A , _A , _A , _A=True ): """simple docstring""" os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) ) _lowerCAmelCase : List[str] = NUM_SHARDS[model_size] _lowerCAmelCase : str = params['n_layers'] _lowerCAmelCase : Optional[int] = params['n_heads'] _lowerCAmelCase : int = n_heads // num_shards _lowerCAmelCase : Optional[int] = params['dim'] _lowerCAmelCase : Union[str, Any] = dim // n_heads _lowerCAmelCase : Union[str, Any] = 10_000.0 _lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA _lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads _lowerCAmelCase : Optional[int] = dim // num_key_value_heads else: # compatibility with other checkpoints _lowerCAmelCase : Union[str, Any] = n_heads _lowerCAmelCase : Any = n_heads_per_shard _lowerCAmelCase : Optional[Any] = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'Fetching all parameters from the checkpoint at {input_base_path}.' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded _lowerCAmelCase : List[Any] = [ torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' ) for i in range(_A ) ] _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Union[str, Any] = {'weight_map': {}} for layer_i in range(_A ): _lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : str = { f'model.layers.{layer_i}.self_attn.q_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wq.weight'] ), f'model.layers.{layer_i}.self_attn.k_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wk.weight'] ), f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'], f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'], f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'], f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'], f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'], f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'], f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _lowerCAmelCase : str = { f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][ f'layers.{layer_i}.attention_norm.weight' ].clone(), f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][ f'layers.{layer_i}.ffn_norm.weight' ].clone(), } _lowerCAmelCase : List[str] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) _lowerCAmelCase : Optional[int] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wk.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) _lowerCAmelCase : Dict = torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wv.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) _lowerCAmelCase : Dict = torch.cat( [loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : Tuple = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : int = inv_freq for k, v in state_dict.items(): _lowerCAmelCase : Optional[Any] = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) _lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : List[str] = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: _lowerCAmelCase : List[str] = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): _lowerCAmelCase : int = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs _lowerCAmelCase : Tuple = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) _lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 _lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6 _lowerCAmelCase : List[Any] = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) _lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' ) _lowerCAmelCase : List[Any] = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) _lowerCAmelCase : Any = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) _lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
25
'''simple docstring''' from math import isqrt def lowercase (_A ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) ) def lowercase (_A = 1_0**6 ): """simple docstring""" _lowerCAmelCase : str = 0 _lowerCAmelCase : str = 1 _lowerCAmelCase : List[str] = 7 while prime_candidate < max_prime: primes_count += is_prime(_A ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : int = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp lowerCAmelCase : List[str] = 5 lowerCAmelCase : Dict = 10 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = SpeechaTextTokenizer __magic_name__ = False __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() _lowerCAmelCase : Optional[int] = sp.SentencePieceProcessor() spm_model.Load(snake_case__ ) _lowerCAmelCase : Optional[Any] = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(snake_case__ ) )] _lowerCAmelCase : Optional[int] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) _lowerCAmelCase : Tuple = Path(self.tmpdirname ) save_json(snake_case__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(snake_case__ , save_dir / VOCAB_FILES_NAMES['spm_file'] ) _lowerCAmelCase : Optional[int] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = '<pad>' _lowerCAmelCase : str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(snake_case__ ) , 1001 ) def a ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1001 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) _lowerCAmelCase : Optional[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [289, 50, 14, 174, 386] , ) _lowerCAmelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual(snake_case__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) _lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : str = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = "valhalla/s2t_mustc_multilinguial_medium" __magic_name__ = "C'est trop cool" __magic_name__ = "Esto es genial" @classmethod def a ( cls ): '''simple docstring''' _lowerCAmelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def a ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 ) def a ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.vocab_size , 1_0000 ) def a ( self ): '''simple docstring''' self.assertIn(snake_case__ , self.tokenizer.all_special_ids ) _lowerCAmelCase : Optional[int] = [ES_CODE, 4, 1601, 47, 7647, 2] _lowerCAmelCase : Any = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) _lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertNotIn(self.tokenizer.eos_token , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = 'fr' _lowerCAmelCase : Union[str, Any] = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , snake_case__ ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) _lowerCAmelCase : str = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
25
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mvp" __magic_name__ = ["past_key_values"] __magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = max_position_embeddings _lowerCAmelCase : Optional[Any] = d_model _lowerCAmelCase : Optional[int] = encoder_ffn_dim _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = encoder_attention_heads _lowerCAmelCase : Any = decoder_ffn_dim _lowerCAmelCase : Optional[Any] = decoder_layers _lowerCAmelCase : int = decoder_attention_heads _lowerCAmelCase : Union[str, Any] = dropout _lowerCAmelCase : List[Any] = attention_dropout _lowerCAmelCase : List[str] = activation_dropout _lowerCAmelCase : Optional[Any] = activation_function _lowerCAmelCase : Any = init_std _lowerCAmelCase : Any = encoder_layerdrop _lowerCAmelCase : Union[str, Any] = decoder_layerdrop _lowerCAmelCase : Optional[int] = classifier_dropout _lowerCAmelCase : List[Any] = use_cache _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCAmelCase : Optional[Any] = use_prompt _lowerCAmelCase : Optional[Any] = prompt_length _lowerCAmelCase : Any = prompt_mid_dim super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ): _lowerCAmelCase : Any = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
25
1
'''simple docstring''' lowerCAmelCase : Optional[int] = [ 9_99, 8_00, 7_99, 6_00, 5_99, 5_00, 4_00, 3_99, 3_77, 3_55, 3_33, 3_11, 2_88, 2_66, 2_44, 2_22, 2_00, 1_99, 1_77, 1_55, 1_33, 1_11, 88, 66, 44, 22, 0, ] lowerCAmelCase : Tuple = [ 9_99, 9_76, 9_52, 9_28, 9_05, 8_82, 8_58, 8_57, 8_10, 7_62, 7_15, 7_14, 5_72, 4_29, 4_28, 2_86, 2_85, 2_38, 1_90, 1_43, 1_42, 1_18, 95, 71, 47, 24, 0, ] lowerCAmelCase : Any = [ 9_99, 9_88, 9_77, 9_66, 9_55, 9_44, 9_33, 9_22, 9_11, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_50, 3_00, 2_99, 2_66, 2_33, 2_00, 1_99, 1_79, 1_59, 1_40, 1_20, 1_00, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] lowerCAmelCase : List[Any] = [ 9_99, 9_95, 9_92, 9_89, 9_85, 9_81, 9_78, 9_75, 9_71, 9_67, 9_64, 9_61, 9_57, 9_56, 9_51, 9_47, 9_42, 9_37, 9_33, 9_28, 9_23, 9_19, 9_14, 9_13, 9_08, 9_03, 8_97, 8_92, 8_87, 8_81, 8_76, 8_71, 8_70, 8_64, 8_58, 8_52, 8_46, 8_40, 8_34, 8_28, 8_27, 8_20, 8_13, 8_06, 7_99, 7_92, 7_85, 7_84, 7_77, 7_70, 7_63, 7_56, 7_49, 7_42, 7_41, 7_33, 7_24, 7_16, 7_07, 6_99, 6_98, 6_88, 6_77, 6_66, 6_56, 6_55, 6_45, 6_34, 6_23, 6_13, 6_12, 5_98, 5_84, 5_70, 5_69, 5_55, 5_41, 5_27, 5_26, 5_05, 4_84, 4_83, 4_62, 4_40, 4_39, 3_96, 3_95, 3_52, 3_51, 3_08, 3_07, 2_64, 2_63, 2_20, 2_19, 1_76, 1_32, 88, 44, 0, ] lowerCAmelCase : Any = [ 9_99, 9_97, 9_95, 9_92, 9_90, 9_88, 9_86, 9_84, 9_81, 9_79, 9_77, 9_75, 9_72, 9_70, 9_68, 9_66, 9_64, 9_61, 9_59, 9_57, 9_56, 9_54, 9_51, 9_49, 9_46, 9_44, 9_41, 9_39, 9_36, 9_34, 9_31, 9_29, 9_26, 9_24, 9_21, 9_19, 9_16, 9_14, 9_13, 9_10, 9_07, 9_05, 9_02, 8_99, 8_96, 8_93, 8_91, 8_88, 8_85, 8_82, 8_79, 8_77, 8_74, 8_71, 8_70, 8_67, 8_64, 8_61, 8_58, 8_55, 8_52, 8_49, 8_46, 8_43, 8_40, 8_37, 8_34, 8_31, 8_28, 8_27, 8_24, 8_21, 8_17, 8_14, 8_11, 8_08, 8_04, 8_01, 7_98, 7_95, 7_91, 7_88, 7_85, 7_84, 7_80, 7_77, 7_74, 7_70, 7_66, 7_63, 7_60, 7_56, 7_52, 7_49, 7_46, 7_42, 7_41, 7_37, 7_33, 7_30, 7_26, 7_22, 7_18, 7_14, 7_10, 7_07, 7_03, 6_99, 6_98, 6_94, 6_90, 6_85, 6_81, 6_77, 6_73, 6_69, 6_64, 6_60, 6_56, 6_55, 6_50, 6_46, 6_41, 6_36, 6_32, 6_27, 6_22, 6_18, 6_13, 6_12, 6_07, 6_02, 5_96, 5_91, 5_86, 5_80, 5_75, 5_70, 5_69, 5_63, 5_57, 5_51, 5_45, 5_39, 5_33, 5_27, 5_26, 5_19, 5_12, 5_05, 4_98, 4_91, 4_84, 4_83, 4_74, 4_66, 4_57, 4_49, 4_40, 4_39, 4_28, 4_18, 4_07, 3_96, 3_95, 3_81, 3_66, 3_52, 3_51, 3_30, 3_08, 3_07, 2_86, 2_64, 2_63, 2_42, 2_20, 2_19, 1_76, 1_75, 1_32, 1_31, 88, 44, 0, ] lowerCAmelCase : List[str] = [ 9_99, 9_91, 9_82, 9_74, 9_66, 9_58, 9_50, 9_41, 9_33, 9_25, 9_16, 9_08, 9_00, 8_99, 8_74, 8_50, 8_25, 8_00, 7_99, 7_00, 6_00, 5_00, 4_00, 3_00, 2_00, 1_00, 0, ] lowerCAmelCase : Union[str, Any] = [ 9_99, 9_92, 9_85, 9_78, 9_71, 9_64, 9_57, 9_49, 9_42, 9_35, 9_28, 9_21, 9_14, 9_07, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_00, 2_99, 2_00, 1_99, 1_00, 99, 0, ] lowerCAmelCase : Any = [ 9_99, 9_96, 9_92, 9_89, 9_85, 9_82, 9_79, 9_75, 9_72, 9_68, 9_65, 9_61, 9_58, 9_55, 9_51, 9_48, 9_44, 9_41, 9_38, 9_34, 9_31, 9_27, 9_24, 9_20, 9_17, 9_14, 9_10, 9_07, 9_03, 9_00, 8_99, 8_91, 8_84, 8_76, 8_69, 8_61, 8_53, 8_46, 8_38, 8_30, 8_23, 8_15, 8_08, 8_00, 7_99, 7_88, 7_77, 7_66, 7_55, 7_44, 7_33, 7_22, 7_11, 7_00, 6_99, 6_88, 6_77, 6_66, 6_55, 6_44, 6_33, 6_22, 6_11, 6_00, 5_99, 5_85, 5_71, 5_57, 5_42, 5_28, 5_14, 5_00, 4_99, 4_85, 4_71, 4_57, 4_42, 4_28, 4_14, 4_00, 3_99, 3_79, 3_59, 3_40, 3_20, 3_00, 2_99, 2_79, 2_59, 2_40, 2_20, 2_00, 1_99, 1_66, 1_33, 1_00, 99, 66, 33, 0, ]
25
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) lowerCAmelCase : str = None lowerCAmelCase : Optional[int] = { """7B""": 1_10_08, """13B""": 1_38_24, """30B""": 1_79_20, """65B""": 2_20_16, """70B""": 2_86_72, } lowerCAmelCase : Optional[int] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def lowercase (_A , _A=1 , _A=2_5_6 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowercase (_A ): """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def lowercase (_A , _A ): """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def lowercase (_A , _A , _A , _A=True ): """simple docstring""" os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) ) _lowerCAmelCase : List[str] = NUM_SHARDS[model_size] _lowerCAmelCase : str = params['n_layers'] _lowerCAmelCase : Optional[int] = params['n_heads'] _lowerCAmelCase : int = n_heads // num_shards _lowerCAmelCase : Optional[int] = params['dim'] _lowerCAmelCase : Union[str, Any] = dim // n_heads _lowerCAmelCase : Union[str, Any] = 10_000.0 _lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA _lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads _lowerCAmelCase : Optional[int] = dim // num_key_value_heads else: # compatibility with other checkpoints _lowerCAmelCase : Union[str, Any] = n_heads _lowerCAmelCase : Any = n_heads_per_shard _lowerCAmelCase : Optional[Any] = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'Fetching all parameters from the checkpoint at {input_base_path}.' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded _lowerCAmelCase : List[Any] = [ torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' ) for i in range(_A ) ] _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Union[str, Any] = {'weight_map': {}} for layer_i in range(_A ): _lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : str = { f'model.layers.{layer_i}.self_attn.q_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wq.weight'] ), f'model.layers.{layer_i}.self_attn.k_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wk.weight'] ), f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'], f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'], f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'], f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'], f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'], f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'], f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _lowerCAmelCase : str = { f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][ f'layers.{layer_i}.attention_norm.weight' ].clone(), f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][ f'layers.{layer_i}.ffn_norm.weight' ].clone(), } _lowerCAmelCase : List[str] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) _lowerCAmelCase : Optional[int] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wk.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) _lowerCAmelCase : Dict = torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wv.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) _lowerCAmelCase : Dict = torch.cat( [loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : Tuple = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : int = inv_freq for k, v in state_dict.items(): _lowerCAmelCase : Optional[Any] = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) _lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : List[str] = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: _lowerCAmelCase : List[str] = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): _lowerCAmelCase : int = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs _lowerCAmelCase : Tuple = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) _lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 _lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6 _lowerCAmelCase : List[Any] = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) _lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' ) _lowerCAmelCase : List[Any] = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) _lowerCAmelCase : Any = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) _lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
25
1
'''simple docstring''' import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline lowerCAmelCase : List[str] = { """n_samples""": 64, """horizon""": 32, """num_inference_steps""": 20, """n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network """scale_grad_by_std""": True, """scale""": 0.1, """eta""": 0.0, """t_grad_cutoff""": 2, """device""": """cpu""", } if __name__ == "__main__": lowerCAmelCase : Union[str, Any] = """hopper-medium-v2""" lowerCAmelCase : Union[str, Any] = gym.make(env_name) lowerCAmelCase : int = ValueGuidedRLPipeline.from_pretrained( """bglick13/hopper-medium-v2-value-function-hor32""", env=env, ) env.seed(0) lowerCAmelCase : List[str] = env.reset() lowerCAmelCase : Optional[Any] = 0 lowerCAmelCase : str = 0 lowerCAmelCase : Dict = 10_00 lowerCAmelCase : int = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy lowerCAmelCase : Tuple = pipeline(obs, planning_horizon=32) # execute action in environment lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = env.step(denorm_actions) lowerCAmelCase : Optional[Any] = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' F''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) lowerCAmelCase : Optional[Any] = next_observation except KeyboardInterrupt: pass print(F'''Total reward: {total_reward}''')
25
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = 1 __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None def a ( self ): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
25
1
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger lowerCAmelCase : Optional[Any] = get_logger(__name__) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = ( os.path.join(snake_case__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) _lowerCAmelCase : Dict = Extractor def a ( self , snake_case__ ): '''simple docstring''' from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" _lowerCAmelCase : Any = os.path.abspath(snake_case__ ) return os.path.join(self.extract_dir , hash_url_to_filename(snake_case__ ) ) def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' return force_extract or ( not os.path.isfile(snake_case__ ) and not (os.path.isdir(snake_case__ ) and os.listdir(snake_case__ )) ) def a ( self , snake_case__ , snake_case__ = False ): '''simple docstring''' _lowerCAmelCase : str = self.extractor.infer_extractor_format(snake_case__ ) if not extractor_format: return input_path _lowerCAmelCase : Optional[Any] = self._get_output_path(snake_case__ ) if self._do_extract(snake_case__ , snake_case__ ): self.extractor.extract(snake_case__ , snake_case__ , snake_case__ ) return output_path class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @classmethod @abstractmethod def a ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' ... @staticmethod @abstractmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' ... class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = [] @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' with open(snake_case__ , 'rb' ) as f: return f.read(snake_case__ ) @classmethod def a ( cls , snake_case__ , snake_case__ = b"" ): '''simple docstring''' if not magic_number: _lowerCAmelCase : List[Any] = max(len(snake_case__ ) for cls_magic_number in cls.magic_numbers ) try: _lowerCAmelCase : List[str] = cls.read_magic_number(snake_case__ , snake_case__ ) except OSError: return False return any(magic_number.startswith(snake_case__ ) for cls_magic_number in cls.magic_numbers ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @classmethod def a ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' return tarfile.is_tarfile(snake_case__ ) @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' def resolved(snake_case__ ) -> str: return os.path.realpath(os.path.abspath(snake_case__ ) ) def badpath(snake_case__ , snake_case__ ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(snake_case__ , snake_case__ ) ).startswith(snake_case__ ) def badlink(snake_case__ , snake_case__ ) -> bool: # Links are interpreted relative to the directory containing the link _lowerCAmelCase : Dict = resolved(os.path.join(snake_case__ , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=snake_case__ ) _lowerCAmelCase : int = resolved(snake_case__ ) for finfo in members: if badpath(finfo.name , snake_case__ ): logger.error(F'Extraction of {finfo.name} is blocked (illegal path)' ) elif finfo.issym() and badlink(snake_case__ , snake_case__ ): logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' ) elif finfo.islnk() and badlink(snake_case__ , snake_case__ ): logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' ) else: yield finfo @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' os.makedirs(snake_case__ , exist_ok=snake_case__ ) _lowerCAmelCase : Dict = tarfile.open(snake_case__ ) tar_file.extractall(snake_case__ , members=TarExtractor.safemembers(snake_case__ , snake_case__ ) ) tar_file.close() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = [b"\x1F\x8B"] @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' with gzip.open(snake_case__ , 'rb' ) as gzip_file: with open(snake_case__ , 'wb' ) as extracted_file: shutil.copyfileobj(snake_case__ , snake_case__ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = [ b"PK\x03\x04", b"PK\x05\x06", # empty archive b"PK\x07\x08", # spanned archive ] @classmethod def a ( cls , snake_case__ , snake_case__ = b"" ): '''simple docstring''' if super().is_extractable(snake_case__ , magic_number=snake_case__ ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(snake_case__ , 'rb' ) as fp: _lowerCAmelCase : Any = _EndRecData(snake_case__ ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: _lowerCAmelCase : List[str] = fp.read(snake_case__ ) # CD is where we expect it to be if len(snake_case__ ) == sizeCentralDir: _lowerCAmelCase : Union[str, Any] = struct.unpack(snake_case__ , snake_case__ ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' os.makedirs(snake_case__ , exist_ok=snake_case__ ) with zipfile.ZipFile(snake_case__ , 'r' ) as zip_file: zip_file.extractall(snake_case__ ) zip_file.close() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = [b"\xFD\x37\x7A\x58\x5A\x00"] @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' with lzma.open(snake_case__ ) as compressed_file: with open(snake_case__ , 'wb' ) as extracted_file: shutil.copyfileobj(snake_case__ , snake_case__ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' if not config.RARFILE_AVAILABLE: raise ImportError('Please pip install rarfile' ) import rarfile os.makedirs(snake_case__ , exist_ok=snake_case__ ) _lowerCAmelCase : Any = rarfile.RarFile(snake_case__ ) rf.extractall(snake_case__ ) rf.close() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = [b"\x28\xb5\x2F\xFD"] @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' if not config.ZSTANDARD_AVAILABLE: raise ImportError('Please pip install zstandard' ) import zstandard as zstd _lowerCAmelCase : Optional[int] = zstd.ZstdDecompressor() with open(snake_case__ , 'rb' ) as ifh, open(snake_case__ , 'wb' ) as ofh: dctx.copy_stream(snake_case__ , snake_case__ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = [b"\x42\x5A\x68"] @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' with bza.open(snake_case__ , 'rb' ) as compressed_file: with open(snake_case__ , 'wb' ) as extracted_file: shutil.copyfileobj(snake_case__ , snake_case__ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = [b"\x37\x7A\xBC\xAF\x27\x1C"] @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' if not config.PY7ZR_AVAILABLE: raise ImportError('Please pip install py7zr' ) import pyazr os.makedirs(snake_case__ , exist_ok=snake_case__ ) with pyazr.SevenZipFile(snake_case__ , 'r' ) as archive: archive.extractall(snake_case__ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = [b"\x04\x22\x4D\x18"] @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' if not config.LZ4_AVAILABLE: raise ImportError('Please pip install lz4' ) import lza.frame with lza.frame.open(snake_case__ , 'rb' ) as compressed_file: with open(snake_case__ , 'wb' ) as extracted_file: shutil.copyfileobj(snake_case__ , snake_case__ ) class UpperCamelCase__ : """simple docstring""" __magic_name__ = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def a ( cls ): '''simple docstring''' return max( len(snake_case__ ) for extractor in cls.extractors.values() if issubclass(snake_case__ , snake_case__ ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' try: return MagicNumberBaseExtractor.read_magic_number(snake_case__ , magic_number_length=snake_case__ ) except OSError: return b"" @classmethod def a ( cls , snake_case__ , snake_case__ = False ): '''simple docstring''' warnings.warn( 'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ' 'Use \'infer_extractor_format\' instead.' , category=snake_case__ , ) _lowerCAmelCase : Optional[int] = cls.infer_extractor_format(snake_case__ ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def a ( cls , snake_case__ ): # <Added version="2.4.0"/> '''simple docstring''' _lowerCAmelCase : Any = cls._get_magic_number_max_length() _lowerCAmelCase : Tuple = cls._read_magic_number(snake_case__ , snake_case__ ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(snake_case__ , magic_number=snake_case__ ): return extractor_format @classmethod def a ( cls , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = "deprecated" , ): '''simple docstring''' os.makedirs(os.path.dirname(snake_case__ ) , exist_ok=snake_case__ ) # Prevent parallel extractions _lowerCAmelCase : Optional[Any] = str(Path(snake_case__ ).with_suffix('.lock' ) ) with FileLock(snake_case__ ): shutil.rmtree(snake_case__ , ignore_errors=snake_case__ ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(snake_case__ , snake_case__ ): # passed as positional arg warnings.warn( 'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ' 'Use \'extractor_format\' instead.' , category=snake_case__ , ) _lowerCAmelCase : Tuple = extractor if extractor != 'deprecated' else extractor_format else: _lowerCAmelCase : int = cls.extractors[extractor_format] return extractor.extract(snake_case__ , snake_case__ ) else: warnings.warn( 'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an ' 'exception in 3.0.0.' , category=snake_case__ , ) for extractor in cls.extractors.values(): if extractor.is_extractable(snake_case__ ): return extractor.extract(snake_case__ , snake_case__ )
25
'''simple docstring''' lowerCAmelCase : List[str] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCAmelCase : List[str] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
25
1
'''simple docstring''' from math import factorial class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = real if isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : str = [1] * rank else: _lowerCAmelCase : List[Any] = rank def __repr__( self ): '''simple docstring''' return ( F'{self.real}+' F'{"+".join(str(snake_case__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}' ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , snake_case__ ) def __add__( self , snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): return Dual(self.real + other , self.duals ) _lowerCAmelCase : List[str] = self.duals.copy() _lowerCAmelCase : Optional[Any] = other.duals.copy() if len(snake_case__ ) > len(snake_case__ ): o_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) ) elif len(snake_case__ ) < len(snake_case__ ): s_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) ) _lowerCAmelCase : Union[str, Any] = [] for i in range(len(snake_case__ ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , snake_case__ ) __magic_name__ = __add__ def __sub__( self , snake_case__ ): '''simple docstring''' return self + other * -1 def __mul__( self , snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Dict = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , snake_case__ ) _lowerCAmelCase : int = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , snake_case__ ) __magic_name__ = __mul__ def __truediv__( self , snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Tuple = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , snake_case__ ) raise ValueError def __floordiv__( self , snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Any = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , snake_case__ ) raise ValueError def __pow__( self , snake_case__ ): '''simple docstring''' if n < 0 or isinstance(snake_case__ , snake_case__ ): raise ValueError('power must be a positive integer' ) if n == 0: return 1 if n == 1: return self _lowerCAmelCase : Tuple = self for _ in range(n - 1 ): x *= self return x def lowercase (_A , _A , _A ): """simple docstring""" if not callable(_A ): raise ValueError('differentiate() requires a function as input for func' ) if not isinstance(_A , (float, int) ): raise ValueError('differentiate() requires a float as input for position' ) if not isinstance(_A , _A ): raise ValueError('differentiate() requires an int as input for order' ) _lowerCAmelCase : str = Dual(_A , 1 ) _lowerCAmelCase : int = func(_A ) if order == 0: return result.real return result.duals[order - 1] * factorial(_A ) if __name__ == "__main__": import doctest doctest.testmod() def lowercase (_A ): """simple docstring""" return y**2 * y**4 print(differentiate(f, 9, 2))
25
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
25
1
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=18 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : int = size if size is not None else {'height': 20, 'width': 20} _lowerCAmelCase : int = crop_size if crop_size is not None else {'height': 18, 'width': 18} _lowerCAmelCase : Union[str, Any] = parent _lowerCAmelCase : Any = batch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : List[str] = image_size _lowerCAmelCase : Optional[Any] = min_resolution _lowerCAmelCase : Optional[int] = max_resolution _lowerCAmelCase : Union[str, Any] = do_resize _lowerCAmelCase : Any = size _lowerCAmelCase : int = do_center_crop _lowerCAmelCase : int = crop_size _lowerCAmelCase : Union[str, Any] = do_normalize _lowerCAmelCase : Optional[int] = image_mean _lowerCAmelCase : int = image_std _lowerCAmelCase : Tuple = do_reduce_labels def a ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def lowercase (): """simple docstring""" _lowerCAmelCase : Union[str, Any] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) _lowerCAmelCase : Union[str, Any] = Image.open(dataset[0]['file'] ) _lowerCAmelCase : Any = Image.open(dataset[1]['file'] ) return image, map def lowercase (): """simple docstring""" _lowerCAmelCase : int = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) _lowerCAmelCase : str = Image.open(ds[0]['file'] ) _lowerCAmelCase : List[str] = Image.open(ds[1]['file'] ) _lowerCAmelCase : int = Image.open(ds[2]['file'] ) _lowerCAmelCase : Dict = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = BeitImageProcessor if is_vision_available() else None def a ( self ): '''simple docstring''' _lowerCAmelCase : str = BeitImageProcessingTester(self ) @property def a ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , 'do_resize' ) ) self.assertTrue(hasattr(snake_case__ , 'size' ) ) self.assertTrue(hasattr(snake_case__ , 'do_center_crop' ) ) self.assertTrue(hasattr(snake_case__ , 'center_crop' ) ) self.assertTrue(hasattr(snake_case__ , 'do_normalize' ) ) self.assertTrue(hasattr(snake_case__ , 'image_mean' ) ) self.assertTrue(hasattr(snake_case__ , 'image_std' ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 20, 'width': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) self.assertEqual(image_processor.do_reduce_labels , snake_case__ ) _lowerCAmelCase : List[str] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=snake_case__ ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) self.assertEqual(image_processor.do_reduce_labels , snake_case__ ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input _lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase : Tuple = image_processing(snake_case__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) # Test not batched input _lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase : Union[str, Any] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input _lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase : Union[str, Any] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) _lowerCAmelCase : List[Any] = [] for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input _lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 ) # Test batched _lowerCAmelCase : List[Any] = image_processing(snake_case__ , snake_case__ , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 ) # Test not batched input (PIL images) _lowerCAmelCase , _lowerCAmelCase : List[Any] = prepare_semantic_single_inputs() _lowerCAmelCase : Any = image_processing(snake_case__ , snake_case__ , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 ) # Test batched input (PIL images) _lowerCAmelCase , _lowerCAmelCase : Optional[int] = prepare_semantic_batch_inputs() _lowerCAmelCase : Dict = image_processing(snake_case__ , snake_case__ , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 _lowerCAmelCase , _lowerCAmelCase : Optional[int] = prepare_semantic_single_inputs() _lowerCAmelCase : int = image_processing(snake_case__ , snake_case__ , return_tensors='pt' ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 150 ) _lowerCAmelCase : Union[str, Any] = True _lowerCAmelCase : Any = image_processing(snake_case__ , snake_case__ , return_tensors='pt' ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 )
25
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "nat" __magic_name__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Tuple = embed_dim _lowerCAmelCase : Any = depths _lowerCAmelCase : Dict = len(snake_case__ ) _lowerCAmelCase : str = num_heads _lowerCAmelCase : Dict = kernel_size _lowerCAmelCase : Union[str, Any] = mlp_ratio _lowerCAmelCase : int = qkv_bias _lowerCAmelCase : Optional[Any] = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : List[str] = drop_path_rate _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : Tuple = layer_norm_eps _lowerCAmelCase : Dict = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) _lowerCAmelCase : Any = layer_scale_init_value _lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )] _lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
25
1
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = r'\w+[.]\d+' _lowerCAmelCase : int = re.findall(_A , _A ) for pat in pats: _lowerCAmelCase : Optional[Any] = key.replace(_A , '_'.join(pat.split('.' ) ) ) return key def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = pt_tuple_key[:-1] + ('scale',) if ( any('norm' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): _lowerCAmelCase : str = pt_tuple_key[:-1] + ('scale',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: _lowerCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ('scale',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: _lowerCAmelCase : int = pt_tuple_key[:-1] + ('embedding',) return renamed_pt_tuple_key, pt_tensor # conv layer _lowerCAmelCase : Optional[int] = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: _lowerCAmelCase : Any = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer _lowerCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight": _lowerCAmelCase : Optional[int] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight _lowerCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ('weight',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias _lowerCAmelCase : Tuple = pt_tuple_key[:-1] + ('bias',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowercase (_A , _A , _A=4_2 ): """simple docstring""" _lowerCAmelCase : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params _lowerCAmelCase : List[Any] = flax_model.init_weights(PRNGKey(_A ) ) _lowerCAmelCase : List[Any] = flatten_dict(_A ) _lowerCAmelCase : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _lowerCAmelCase : Union[str, Any] = rename_key(_A ) _lowerCAmelCase : List[Any] = tuple(renamed_pt_key.split('.' ) ) # Correctly rename weight parameters _lowerCAmelCase , _lowerCAmelCase : Tuple = rename_key_and_reshape_tensor(_A , _A , _A ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # also add unexpected weight so that warning is thrown _lowerCAmelCase : Dict = jnp.asarray(_A ) return unflatten_dict(_A )
25
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : str = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """roberta-base""": 5_12, """roberta-large""": 5_12, """roberta-large-mnli""": 5_12, """distilroberta-base""": 5_12, """roberta-base-openai-detector""": 5_12, """roberta-large-openai-detector""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = RobertaTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) ) _lowerCAmelCase : List[Any] = add_prefix_space _lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = add_prefix_space _lowerCAmelCase : Union[str, Any] = 'post_processor' _lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ ) if tokenizer_component_instance: _lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCAmelCase : Any = tuple(state['sep'] ) if "cls" in state: _lowerCAmelCase : str = tuple(state['cls'] ) _lowerCAmelCase : List[str] = False if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : int = add_prefix_space _lowerCAmelCase : Tuple = True if state.get('trim_offsets' , snake_case__ ) != trim_offsets: _lowerCAmelCase : Union[str, Any] = trim_offsets _lowerCAmelCase : Optional[int] = True if changes_to_apply: _lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) ) _lowerCAmelCase : Optional[int] = component_class(**snake_case__ ) setattr(self.backend_tokenizer , snake_case__ , snake_case__ ) @property def a ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value _lowerCAmelCase : Tuple = value def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
25
1
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration lowerCAmelCase : List[Any] = 5_00_00 lowerCAmelCase : Dict = 50_00 lowerCAmelCase , lowerCAmelCase : Optional[Any] = os.path.split(__file__) lowerCAmelCase : Dict = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def lowercase (_A , _A ): """simple docstring""" for i in range(_A ): _lowerCAmelCase : List[str] = dataset[i] @get_duration def lowercase (_A , _A , _A ): """simple docstring""" for i in range(0 , len(_A ) , _A ): _lowerCAmelCase : List[Any] = dataset[i : i + batch_size] @get_duration def lowercase (_A , _A , _A ): """simple docstring""" with dataset.formatted_as(type=_A ): for i in range(_A ): _lowerCAmelCase : List[Any] = dataset[i] @get_duration def lowercase (_A , _A , _A , _A ): """simple docstring""" with dataset.formatted_as(type=_A ): for i in range(0 , _A , _A ): _lowerCAmelCase : List[Any] = dataset[i : i + batch_size] def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = {'num examples': SPEED_TEST_N_EXAMPLES} _lowerCAmelCase : Tuple = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}), ] _lowerCAmelCase : Union[str, Any] = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) _lowerCAmelCase : Tuple = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) _lowerCAmelCase : Union[str, Any] = generate_example_dataset( os.path.join(_A , 'dataset.arrow' ) , _A , num_examples=_A , seq_shapes={'list': (1_0_0,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(_A ) ) _lowerCAmelCase : List[str] = func(_A , **_A ) print('shuffling dataset' ) _lowerCAmelCase : Optional[int] = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(_A ) ) _lowerCAmelCase : List[Any] = func( _A , **_A ) with open(_A , 'wb' ) as f: f.write(json.dumps(_A ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
25
'''simple docstring''' lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag. lowerCAmelCase : Optional[int] = 1 # The second color of the flag. lowerCAmelCase : int = 2 # The third color of the flag. lowerCAmelCase : Any = (red, white, blue) def lowercase (_A ): """simple docstring""" if not sequence: return [] if len(_A ) == 1: return list(_A ) _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : List[str] = len(_A ) - 1 _lowerCAmelCase : Optional[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid] high -= 1 else: _lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values' raise ValueError(_A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip() lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")] print(F'''{dutch_national_flag_sort(unsorted)}''')
25
1
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True) def lowercase (_A ): """simple docstring""" if hor == 1_2_8: _lowerCAmelCase : Dict = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') _lowerCAmelCase : str = (3_2, 1_2_8, 2_5_6) _lowerCAmelCase : str = ('UpResnetBlock1D', 'UpResnetBlock1D') elif hor == 3_2: _lowerCAmelCase : Union[str, Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') _lowerCAmelCase : Tuple = (3_2, 6_4, 1_2_8, 2_5_6) _lowerCAmelCase : Union[str, Any] = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D') _lowerCAmelCase : Optional[int] = torch.load(f'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' ) _lowerCAmelCase : Optional[int] = model.state_dict() _lowerCAmelCase : Any = { 'down_block_types': down_block_types, 'block_out_channels': block_out_channels, 'up_block_types': up_block_types, 'layers_per_block': 1, 'use_timestep_embedding': True, 'out_block_type': 'OutConv1DBlock', 'norm_num_groups': 8, 'downsample_each_block': False, 'in_channels': 1_4, 'out_channels': 1_4, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'flip_sin_to_cos': False, 'freq_shift': 1, 'sample_size': 6_5_5_3_6, 'mid_block_type': 'MidResTemporalBlock1D', 'act_fn': 'mish', } _lowerCAmelCase : List[Any] = UNetaDModel(**_A ) print(f'length of state dict: {len(state_dict.keys() )}' ) print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' ) _lowerCAmelCase : List[str] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): _lowerCAmelCase : str = state_dict.pop(_A ) hf_value_function.load_state_dict(_A ) torch.save(hf_value_function.state_dict() , f'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' ) with open(f'hub/hopper-medium-v2/unet/hor{hor}/config.json' , 'w' ) as f: json.dump(_A , _A ) def lowercase (): """simple docstring""" _lowerCAmelCase : Dict = { 'in_channels': 1_4, 'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'), 'up_block_types': (), 'out_block_type': 'ValueFunction', 'mid_block_type': 'ValueFunctionMidBlock1D', 'block_out_channels': (3_2, 6_4, 1_2_8, 2_5_6), 'layers_per_block': 1, 'downsample_each_block': True, 'sample_size': 6_5_5_3_6, 'out_channels': 1_4, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'use_timestep_embedding': True, 'flip_sin_to_cos': False, 'freq_shift': 1, 'norm_num_groups': 8, 'act_fn': 'mish', } _lowerCAmelCase : Optional[Any] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' ) _lowerCAmelCase : str = model _lowerCAmelCase : Optional[int] = UNetaDModel(**_A ) print(f'length of state dict: {len(state_dict.keys() )}' ) print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' ) _lowerCAmelCase : int = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): _lowerCAmelCase : List[str] = state_dict.pop(_A ) hf_value_function.load_state_dict(_A ) torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' ) with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f: json.dump(_A , _A ) if __name__ == "__main__": unet(32) # unet(128) value_function()
25
'''simple docstring''' def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1] _lowerCAmelCase : int = 6 _lowerCAmelCase : Dict = 1 _lowerCAmelCase : Optional[int] = 1_9_0_1 _lowerCAmelCase : Optional[Any] = 0 while year < 2_0_0_1: day += 7 if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] elif day > 2_9 and month == 2: month += 1 _lowerCAmelCase : List[str] = day - 2_9 else: if day > days_per_month[month - 1]: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] if month > 1_2: year += 1 _lowerCAmelCase : Optional[int] = 1 if year < 2_0_0_1 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
25
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : Optional[Any] = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
'''simple docstring''' def lowercase (_A = 1_0_0_0_0_0_0 ): """simple docstring""" _lowerCAmelCase : Any = set(range(3 , _A , 2 ) ) primes.add(2 ) for p in range(3 , _A , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _A , _A ) ) ) _lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )] for p in primes: for n in range(_A , limit + 1 , _A ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' class UpperCamelCase__ : """simple docstring""" def __init__( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : Optional[Any] = 0 _lowerCAmelCase : str = {} def a ( self , snake_case__ ): '''simple docstring''' if vertex not in self.adjacency: _lowerCAmelCase : List[Any] = {} self.num_vertices += 1 def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' self.add_vertex(snake_case__ ) self.add_vertex(snake_case__ ) if head == tail: return _lowerCAmelCase : Optional[int] = weight _lowerCAmelCase : List[str] = weight def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.get_edges() for edge in edges: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = edge edges.remove((tail, head, weight) ) for i in range(len(snake_case__ ) ): _lowerCAmelCase : Optional[int] = list(edges[i] ) edges.sort(key=lambda snake_case__ : e[2] ) for i in range(len(snake_case__ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: _lowerCAmelCase : str = edges[i][2] + 1 for edge in edges: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge _lowerCAmelCase : str = weight _lowerCAmelCase : Optional[Any] = weight def __str__( self ): '''simple docstring''' _lowerCAmelCase : int = '' for tail in self.adjacency: for head in self.adjacency[tail]: _lowerCAmelCase : Tuple = self.adjacency[head][tail] string += F'{head} -> {tail} == {weight}\n' return string.rstrip('\n' ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def a ( self ): '''simple docstring''' return self.adjacency.keys() @staticmethod def a ( snake_case__=None , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Dict = Graph() if vertices is None: _lowerCAmelCase : Optional[Any] = [] if edges is None: _lowerCAmelCase : List[str] = [] for vertex in vertices: g.add_vertex(snake_case__ ) for edge in edges: g.add_edge(*snake_case__ ) return g class UpperCamelCase__ : """simple docstring""" def __init__( self ): '''simple docstring''' _lowerCAmelCase : Tuple = {} _lowerCAmelCase : List[Any] = {} def __len__( self ): '''simple docstring''' return len(self.parent ) def a ( self , snake_case__ ): '''simple docstring''' if item in self.parent: return self.find(snake_case__ ) _lowerCAmelCase : List[str] = item _lowerCAmelCase : Tuple = 0 return item def a ( self , snake_case__ ): '''simple docstring''' if item not in self.parent: return self.make_set(snake_case__ ) if item != self.parent[item]: _lowerCAmelCase : Optional[int] = self.find(self.parent[item] ) return self.parent[item] def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.find(snake_case__ ) _lowerCAmelCase : str = self.find(snake_case__ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: _lowerCAmelCase : Tuple = roota return roota if self.rank[roota] < self.rank[roota]: _lowerCAmelCase : str = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 _lowerCAmelCase : List[Any] = roota return roota return None @staticmethod def a ( snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = graph.num_vertices _lowerCAmelCase : str = Graph.UnionFind() _lowerCAmelCase : Optional[int] = [] while num_components > 1: _lowerCAmelCase : List[Any] = {} for vertex in graph.get_vertices(): _lowerCAmelCase : int = -1 _lowerCAmelCase : str = graph.get_edges() for edge in edges: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge edges.remove((tail, head, weight) ) for edge in edges: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = edge _lowerCAmelCase : List[Any] = union_find.find(snake_case__ ) _lowerCAmelCase : Dict = union_find.find(snake_case__ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _lowerCAmelCase : int = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _lowerCAmelCase : Any = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = cheap_edge[vertex] if union_find.find(snake_case__ ) != union_find.find(snake_case__ ): union_find.union(snake_case__ , snake_case__ ) mst_edges.append(cheap_edge[vertex] ) _lowerCAmelCase : Optional[int] = num_components - 1 _lowerCAmelCase : Optional[Any] = Graph.build(edges=snake_case__ ) return mst
25
'''simple docstring''' import argparse import os import re lowerCAmelCase : Tuple = """src/transformers""" # Pattern that looks at the indentation in a line. lowerCAmelCase : str = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""") def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = _re_indent.search(_A ) return "" if search is None else search.groups()[0] def lowercase (_A , _A="" , _A=None , _A=None ): """simple docstring""" _lowerCAmelCase : int = 0 _lowerCAmelCase : Dict = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 _lowerCAmelCase : Dict = ['\n'.join(lines[:index] )] else: _lowerCAmelCase : str = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCAmelCase : List[Any] = [lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_A ) ) if index < len(_A ) - 1: _lowerCAmelCase : Union[str, Any] = [lines[index + 1]] index += 1 else: _lowerCAmelCase : Union[str, Any] = [] else: blocks.append('\n'.join(_A ) ) _lowerCAmelCase : List[str] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append('\n'.join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append('\n'.join(lines[index:] ) ) return blocks def lowercase (_A ): """simple docstring""" def _inner(_A ): return key(_A ).lower().replace('_' , '' ) return _inner def lowercase (_A , _A=None ): """simple docstring""" def noop(_A ): return x if key is None: _lowerCAmelCase : List[Any] = noop # Constants are all uppercase, they go first. _lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. _lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()] _lowerCAmelCase : Dict = ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def lowercase (_A ): """simple docstring""" def _replace(_A ): _lowerCAmelCase : Dict = match.groups()[0] if "," not in imports: return f'[{imports}]' _lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : int = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" _lowerCAmelCase : Tuple = import_statement.split('\n' ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1 _lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] ) _lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : List[str] = keys[:-1] _lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line _lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A ) return import_statement def lowercase (_A , _A=True ): """simple docstring""" with open(_A , encoding='utf-8' ) as f: _lowerCAmelCase : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCAmelCase : Tuple = split_code_in_indented_blocks( _A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCAmelCase : Tuple = main_blocks[block_idx] _lowerCAmelCase : int = block.split('\n' ) # Get to the start of the imports. _lowerCAmelCase : Tuple = 0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCAmelCase : Dict = len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. _lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] ) _lowerCAmelCase : Tuple = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None] _lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCAmelCase : int = 0 _lowerCAmelCase : Optional[Any] = [] for i in range(len(_A ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: _lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. _lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(_A ) ) def lowercase (_A=True ): """simple docstring""" _lowerCAmelCase : int = [] for root, _, files in os.walk(_A ): if "__init__.py" in files: _lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A ) if result: _lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCAmelCase : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
25
1
'''simple docstring''' import sys import turtle def lowercase (_A , _A ): """simple docstring""" return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowercase (_A , _A , _A , _A , ): """simple docstring""" my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(_A , get_mid(_A , _A ) , get_mid(_A , _A ) , depth - 1 ) triangle(_A , get_mid(_A , _A ) , get_mid(_A , _A ) , depth - 1 ) triangle(_A , get_mid(_A , _A ) , get_mid(_A , _A ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( """Correct format for using this script: """ """python fractals.py <int:depth_for_fractal>""" ) lowerCAmelCase : str = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("""red""") lowerCAmelCase : Optional[int] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
25
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = KandinskyVaaInpaintPipeline __magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"] __magic_name__ = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] __magic_name__ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__ = False @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return self.time_input_dim @property def a ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def a ( self ): '''simple docstring''' return 100 @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ ) return model @property def a ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs ) return model def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.dummy_unet _lowerCAmelCase : List[Any] = self.dummy_movq _lowerCAmelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , ) _lowerCAmelCase : Any = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create init_image _lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) ) # create mask _lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase : Dict = 0 if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Optional[int] = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = 'cpu' _lowerCAmelCase : int = self.get_dummy_components() _lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) ) _lowerCAmelCase : int = output.images _lowerCAmelCase : int = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] _lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] _lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(F'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : List[str] = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def a ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) _lowerCAmelCase : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : List[str] = 'a hat' _lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) _lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) _lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _lowerCAmelCase : Optional[Any] = pipeline( image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
25
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mobilenet_v2" def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Union[str, Any] = image_size _lowerCAmelCase : List[Any] = depth_multiplier _lowerCAmelCase : List[Any] = depth_divisible_by _lowerCAmelCase : Optional[Any] = min_depth _lowerCAmelCase : str = expand_ratio _lowerCAmelCase : str = output_stride _lowerCAmelCase : Any = first_layer_is_expansion _lowerCAmelCase : int = finegrained_output _lowerCAmelCase : str = hidden_act _lowerCAmelCase : List[str] = tf_padding _lowerCAmelCase : Optional[int] = classifier_dropout_prob _lowerCAmelCase : int = initializer_range _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : str = semantic_loss_ignore_index class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = version.parse("1.11" ) @property def a ( self ): '''simple docstring''' return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' return 1E-4
25
'''simple docstring''' from __future__ import annotations from typing import Any def lowercase (_A ): """simple docstring""" if not postfix_notation: return 0 _lowerCAmelCase : int = {'+', '-', '*', '/'} _lowerCAmelCase : list[Any] = [] for token in postfix_notation: if token in operations: _lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_A ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowerCAmelCase : Tuple = """src/diffusers""" lowerCAmelCase : Dict = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowerCAmelCase : Union[str, Any] = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowerCAmelCase : List[Any] = spec.loader.load_module() def lowercase (_A , _A ): """simple docstring""" return line.startswith(_A ) or len(_A ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , _A ) is not None def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[Any] = object_name.split('.' ) _lowerCAmelCase : Optional[Any] = 0 # First let's find the module where our object lives. _lowerCAmelCase : Dict = parts[i] while i < len(_A ) and not os.path.isfile(os.path.join(_A , f'{module}.py' ) ): i += 1 if i < len(_A ): _lowerCAmelCase : Dict = os.path.join(_A , parts[i] ) if i >= len(_A ): raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' ) with open(os.path.join(_A , f'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f: _lowerCAmelCase : List[Any] = f.readlines() # Now let's find the class / func in the code! _lowerCAmelCase : Dict = '' _lowerCAmelCase : Tuple = 0 for name in parts[i + 1 :]: while ( line_index < len(_A ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(_A ): raise ValueError(f' {object_name} does not match any function or class in {module}.' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). _lowerCAmelCase : Tuple = line_index while line_index < len(_A ) and _should_continue(lines[line_index] , _A ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 _lowerCAmelCase : Dict = lines[start_index:line_index] return "".join(_A ) lowerCAmelCase : Union[str, Any] = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowerCAmelCase : Optional[int] = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowerCAmelCase : str = re.compile(r"""<FILL\s+[^>]*>""") def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[int] = code.split('\n' ) _lowerCAmelCase : Optional[Any] = 0 while idx < len(_A ) and len(lines[idx] ) == 0: idx += 1 if idx < len(_A ): return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0] return "" def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[Any] = len(get_indent(_A ) ) > 0 if has_indent: _lowerCAmelCase : int = f'class Bla:\n{code}' _lowerCAmelCase : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=_A ) _lowerCAmelCase : List[str] = black.format_str(_A , mode=_A ) _lowerCAmelCase , _lowerCAmelCase : List[str] = style_docstrings_in_code(_A ) return result[len('class Bla:\n' ) :] if has_indent else result def lowercase (_A , _A=False ): """simple docstring""" with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f: _lowerCAmelCase : str = f.readlines() _lowerCAmelCase : Union[str, Any] = [] _lowerCAmelCase : str = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(_A ): _lowerCAmelCase : List[str] = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = search.groups() _lowerCAmelCase : Optional[int] = find_code_in_diffusers(_A ) _lowerCAmelCase : List[Any] = get_indent(_A ) _lowerCAmelCase : str = line_index + 1 if indent == theoretical_indent else line_index + 2 _lowerCAmelCase : Union[str, Any] = theoretical_indent _lowerCAmelCase : Dict = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. _lowerCAmelCase : Optional[int] = True while line_index < len(_A ) and should_continue: line_index += 1 if line_index >= len(_A ): break _lowerCAmelCase : Any = lines[line_index] _lowerCAmelCase : Optional[int] = _should_continue(_A , _A ) and re.search(f'^{indent}# End copy' , _A ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 _lowerCAmelCase : Union[str, Any] = lines[start_index:line_index] _lowerCAmelCase : Optional[int] = ''.join(_A ) # Remove any nested `Copied from` comments to avoid circular copies _lowerCAmelCase : List[Any] = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(_A ) is None] _lowerCAmelCase : List[Any] = '\n'.join(_A ) # Before comparing, use the `replace_pattern` on the original code. if len(_A ) > 0: _lowerCAmelCase : List[Any] = replace_pattern.replace('with' , '' ).split(',' ) _lowerCAmelCase : Union[str, Any] = [_re_replace_pattern.search(_A ) for p in patterns] for pattern in patterns: if pattern is None: continue _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = pattern.groups() _lowerCAmelCase : Optional[Any] = re.sub(_A , _A , _A ) if option.strip() == "all-casing": _lowerCAmelCase : Any = re.sub(obja.lower() , obja.lower() , _A ) _lowerCAmelCase : Optional[int] = re.sub(obja.upper() , obja.upper() , _A ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line _lowerCAmelCase : Any = blackify(lines[start_index - 1] + theoretical_code ) _lowerCAmelCase : Optional[int] = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: _lowerCAmelCase : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] _lowerCAmelCase : str = start_index + 1 if overwrite and len(_A ) > 0: # Warn the user a file has been modified. print(f'Detected changes, rewriting {filename}.' ) with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(_A ) return diffs def lowercase (_A = False ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = glob.glob(os.path.join(_A , '**/*.py' ) , recursive=_A ) _lowerCAmelCase : Union[str, Any] = [] for filename in all_files: _lowerCAmelCase : int = is_copy_consistent(_A , _A ) diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs] if not overwrite and len(_A ) > 0: _lowerCAmelCase : Any = '\n'.join(_A ) raise Exception( 'Found the following copy inconsistencies:\n' + diff + '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' ) if __name__ == "__main__": lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowerCAmelCase : Tuple = parser.parse_args() check_copies(args.fix_and_overwrite)
25
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mobilenet_v2" def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Union[str, Any] = image_size _lowerCAmelCase : List[Any] = depth_multiplier _lowerCAmelCase : List[Any] = depth_divisible_by _lowerCAmelCase : Optional[Any] = min_depth _lowerCAmelCase : str = expand_ratio _lowerCAmelCase : str = output_stride _lowerCAmelCase : Any = first_layer_is_expansion _lowerCAmelCase : int = finegrained_output _lowerCAmelCase : str = hidden_act _lowerCAmelCase : List[str] = tf_padding _lowerCAmelCase : Optional[int] = classifier_dropout_prob _lowerCAmelCase : int = initializer_range _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : str = semantic_loss_ignore_index class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = version.parse("1.11" ) @property def a ( self ): '''simple docstring''' return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' return 1E-4
25
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Any = {"""vocab_file""": """sentencepiece.bpe.model"""} lowerCAmelCase : Dict = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", } } lowerCAmelCase : str = { """camembert-base""": 5_12, } lowerCAmelCase : Dict = """▁""" class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] def __init__( self , snake_case__ , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=["<s>NOTUSED", "</s>NOTUSED"] , snake_case__ = None , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Dict = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token _lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , ) _lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(snake_case__ ) ) _lowerCAmelCase : Any = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> _lowerCAmelCase : List[Any] = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3} _lowerCAmelCase : Tuple = len(self.fairseq_tokens_to_ids ) _lowerCAmelCase : Dict = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) _lowerCAmelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase : str = [self.cls_token_id] _lowerCAmelCase : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def a ( self ): '''simple docstring''' return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(snake_case__ ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [] _lowerCAmelCase : List[str] = '' _lowerCAmelCase : List[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case__ ) + token _lowerCAmelCase : List[str] = True _lowerCAmelCase : Optional[int] = [] else: current_sub_tokens.append(snake_case__ ) _lowerCAmelCase : Union[str, Any] = False out_string += self.sp_model.decode(snake_case__ ) return out_string.strip() def __getstate__( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.__dict__.copy() _lowerCAmelCase : Any = None return state def __setstate__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _lowerCAmelCase : Optional[int] = {} _lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase : Any = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case__ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case__ , 'wb' ) as fi: _lowerCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (out_vocab_file,)
25
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER _lowerCAmelCase : Optional[int] = 'pt' _lowerCAmelCase : Tuple = 'tf' def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ ) model_tf.save_pretrained(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = 'mock_framework' # Framework provided - return whatever the user provides _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(snake_case__ ): _lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase : Any = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase : int = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase : str = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): with self.assertRaises(snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
25
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Dict = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mgp-str" def __init__( self , snake_case__=[32, 128] , snake_case__=4 , snake_case__=3 , snake_case__=27 , snake_case__=38 , snake_case__=5_0257 , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=4.0 , snake_case__=True , snake_case__=False , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=False , snake_case__=0.02 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : int = image_size _lowerCAmelCase : int = patch_size _lowerCAmelCase : Optional[int] = num_channels _lowerCAmelCase : Optional[Any] = max_token_length _lowerCAmelCase : Optional[int] = num_character_labels _lowerCAmelCase : Optional[Any] = num_bpe_labels _lowerCAmelCase : Tuple = num_wordpiece_labels _lowerCAmelCase : Dict = hidden_size _lowerCAmelCase : Optional[Any] = num_hidden_layers _lowerCAmelCase : Tuple = num_attention_heads _lowerCAmelCase : List[Any] = mlp_ratio _lowerCAmelCase : int = distilled _lowerCAmelCase : Dict = layer_norm_eps _lowerCAmelCase : Union[str, Any] = drop_rate _lowerCAmelCase : Union[str, Any] = qkv_bias _lowerCAmelCase : Any = attn_drop_rate _lowerCAmelCase : Optional[Any] = drop_path_rate _lowerCAmelCase : int = output_aa_attentions _lowerCAmelCase : Optional[int] = initializer_range
25
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase : Optional[int] = None lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Any = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """facebook/nllb-large-en-ro""": 10_24, """facebook/nllb-200-distilled-600M""": 10_24, } # fmt: off lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = NllbTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token _lowerCAmelCase : Dict = legacy_behaviour super().__init__( vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[str] = vocab_file _lowerCAmelCase : int = False if not self.vocab_file else True _lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _lowerCAmelCase : Any = { lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn' _lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang ) _lowerCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def a ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _lowerCAmelCase : Optional[Any] = src_lang _lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ ) _lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ ) _lowerCAmelCase : Optional[Any] = tgt_lang_id return inputs def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[str] = src_lang _lowerCAmelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def a ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def a ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : Dict = [] _lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : int = [self.eos_token_id] _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : int = [] _lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : List[str] = [self.eos_token_id] _lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return _lowerCAmelCase : Union[str, Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
25
1
'''simple docstring''' import argparse import hashlib # hashlib is only used inside the Test class import struct class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[str] = data _lowerCAmelCase : List[Any] = [0x67_45_23_01, 0xEF_CD_AB_89, 0x98_BA_DC_FE, 0x10_32_54_76, 0xC3_D2_E1_F0] @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' return ((n << b) | (n >> (32 - b))) & 0xFF_FF_FF_FF def a ( self ): '''simple docstring''' _lowerCAmelCase : int = b'\x80' + b'\x00' * (63 - (len(self.data ) + 8) % 64) _lowerCAmelCase : Union[str, Any] = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) ) return padded_data def a ( self ): '''simple docstring''' return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = list(struct.unpack('>16L' , snake_case__ ) ) + [0] * 64 for i in range(16 , 80 ): _lowerCAmelCase : Tuple = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.padding() _lowerCAmelCase : Optional[Any] = self.split_blocks() for block in self.blocks: _lowerCAmelCase : Dict = self.expand_block(snake_case__ ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = self.h for i in range(0 , 80 ): if 0 <= i < 20: _lowerCAmelCase : List[str] = (b & c) | ((~b) & d) _lowerCAmelCase : Tuple = 0x5A_82_79_99 elif 20 <= i < 40: _lowerCAmelCase : Optional[Any] = b ^ c ^ d _lowerCAmelCase : Union[str, Any] = 0x6E_D9_EB_A1 elif 40 <= i < 60: _lowerCAmelCase : Union[str, Any] = (b & c) | (b & d) | (c & d) _lowerCAmelCase : Tuple = 0x8F_1B_BC_DC elif 60 <= i < 80: _lowerCAmelCase : int = b ^ c ^ d _lowerCAmelCase : Union[str, Any] = 0xCA_62_C1_D6 _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = ( self.rotate(snake_case__ , 5 ) + f + e + k + expanded_block[i] & 0xFF_FF_FF_FF, a, self.rotate(snake_case__ , 30 ), c, d, ) _lowerCAmelCase : Any = ( self.h[0] + a & 0xFF_FF_FF_FF, self.h[1] + b & 0xFF_FF_FF_FF, self.h[2] + c & 0xFF_FF_FF_FF, self.h[3] + d & 0xFF_FF_FF_FF, self.h[4] + e & 0xFF_FF_FF_FF, ) return ("{:08x}" * 5).format(*self.h ) def lowercase (): """simple docstring""" _lowerCAmelCase : Tuple = B'Test String' assert SHAaHash(_A ).final_hash() == hashlib.shaa(_A ).hexdigest() # noqa: S324 def lowercase (): """simple docstring""" _lowerCAmelCase : List[Any] = argparse.ArgumentParser(description='Process some strings or files' ) parser.add_argument( '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' ) _lowerCAmelCase : Any = parser.parse_args() _lowerCAmelCase : Dict = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: _lowerCAmelCase : Optional[int] = f.read() else: _lowerCAmelCase : List[Any] = bytes(_A , 'utf-8' ) print(SHAaHash(_A ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
25
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase : List[str] = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def lowercase (_A ): """simple docstring""" for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""") lowerCAmelCase : Dict = parser.parse_args() if args.check_lib: lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""") lowerCAmelCase : int = Path(transformers_module.__file__).parent else: lowerCAmelCase : int = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
25
1
'''simple docstring''' import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename lowerCAmelCase : Tuple = """http://www.mocksite.com/file1.txt""" lowerCAmelCase : Any = """\"text\": [\"foo\", \"foo\"]""" lowerCAmelCase : Tuple = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8""" class UpperCamelCase__ : """simple docstring""" __magic_name__ = 2_0_0 __magic_name__ = {"Content-Length": "100"} __magic_name__ = {} def a ( self , **snake_case__ ): '''simple docstring''' return [bytes(snake_case__ , 'utf-8' )] def lowercase (*_A , **_A ): """simple docstring""" return MockResponse() @pytest.mark.parametrize('urls_type' , [str, list, dict] ) def lowercase (_A , _A , _A ): """simple docstring""" import requests monkeypatch.setattr(_A , 'request' , _A ) _lowerCAmelCase : Optional[Any] = URL if issubclass(_A , _A ): _lowerCAmelCase : List[str] = url elif issubclass(_A , _A ): _lowerCAmelCase : str = [url] elif issubclass(_A , _A ): _lowerCAmelCase : Tuple = {'train': url} _lowerCAmelCase : Tuple = 'dummy' _lowerCAmelCase : Optional[int] = 'downloads' _lowerCAmelCase : int = tmp_path _lowerCAmelCase : Dict = DownloadConfig( cache_dir=os.path.join(_A , _A ) , use_etag=_A , ) _lowerCAmelCase : str = DownloadManager(dataset_name=_A , download_config=_A ) _lowerCAmelCase : int = dl_manager.download(_A ) _lowerCAmelCase : Optional[int] = urls for downloaded_paths in [downloaded_paths]: if isinstance(_A , _A ): _lowerCAmelCase : Tuple = [downloaded_paths] _lowerCAmelCase : Optional[Any] = [urls] elif isinstance(_A , _A ): assert "train" in downloaded_paths.keys() _lowerCAmelCase : int = downloaded_paths.values() _lowerCAmelCase : Dict = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(_A , _A ): assert downloaded_path == dl_manager.downloaded_paths[input_url] _lowerCAmelCase : Union[str, Any] = Path(_A ) _lowerCAmelCase : Optional[int] = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() _lowerCAmelCase : Optional[Any] = downloaded_path.read_text() assert content == CONTENT _lowerCAmelCase : int = downloaded_path.with_suffix('.json' ) assert metadata_downloaded_path.exists() _lowerCAmelCase : List[str] = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('paths_type' , [str, list, dict] ) def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : List[Any] = str(_A ) if issubclass(_A , _A ): _lowerCAmelCase : Any = filename elif issubclass(_A , _A ): _lowerCAmelCase : int = [filename] elif issubclass(_A , _A ): _lowerCAmelCase : int = {'train': filename} _lowerCAmelCase : Tuple = 'dummy' _lowerCAmelCase : int = xz_file.parent _lowerCAmelCase : List[str] = 'extracted' _lowerCAmelCase : List[str] = DownloadConfig( cache_dir=_A , use_etag=_A , ) _lowerCAmelCase : Optional[Any] = DownloadManager(dataset_name=_A , download_config=_A ) _lowerCAmelCase : Union[str, Any] = dl_manager.extract(_A ) _lowerCAmelCase : List[str] = paths for extracted_paths in [extracted_paths]: if isinstance(_A , _A ): _lowerCAmelCase : Tuple = [extracted_paths] _lowerCAmelCase : Optional[int] = [paths] elif isinstance(_A , _A ): assert "train" in extracted_paths.keys() _lowerCAmelCase : str = extracted_paths.values() _lowerCAmelCase : List[str] = paths.values() assert extracted_paths for extracted_path, input_path in zip(_A , _A ): assert extracted_path == dl_manager.extracted_paths[input_path] _lowerCAmelCase : Any = Path(_A ) _lowerCAmelCase : Any = extracted_path.parts assert parts[-1] == hash_url_to_filename(_A , etag=_A ) assert parts[-2] == extracted_subdir assert extracted_path.exists() _lowerCAmelCase : Any = extracted_path.read_text() _lowerCAmelCase : Union[str, Any] = text_file.read_text() assert extracted_file_content == expected_file_content def lowercase (_A , _A ): """simple docstring""" assert path.endswith('.jsonl' ) for num_items, line in enumerate(_A , start=1 ): _lowerCAmelCase : Any = json.loads(line.decode('utf-8' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : List[str] = request.getfixturevalue(_A ) _lowerCAmelCase : Union[str, Any] = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_A ) , start=1 ): _test_jsonl(_A , _A ) assert num_jsonl == 2 @pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : List[Any] = request.getfixturevalue(_A ) _lowerCAmelCase : List[str] = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_A ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_A ) , start=1 ): _test_jsonl(_A , _A ) assert num_tar == 1 assert num_jsonl == 2 def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Tuple = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(_A ) , start=1 ): assert os.path.basename(_A ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
25
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" _lowerCAmelCase : List[str] = '' _lowerCAmelCase : Any = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i _lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )] # for each character in new_string find corresponding palindromic string _lowerCAmelCase : Any = 0 for j in range(len(_A ) ): _lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowerCAmelCase : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741 _lowerCAmelCase : int = j + k - 1 # update max_length and start position if max_length < length[j]: _lowerCAmelCase : Dict = length[j] _lowerCAmelCase : Optional[int] = j # create that string _lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' from math import pi, sqrt def lowercase (_A ): """simple docstring""" if num <= 0: raise ValueError('math domain error' ) if num > 171.5: raise OverflowError('math range error' ) elif num - int(_A ) not in (0, 0.5): raise NotImplementedError('num must be an integer or a half-integer' ) elif num == 0.5: return sqrt(_A ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def lowercase (): """simple docstring""" assert gamma(0.5 ) == sqrt(_A ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase : List[Any] = 1.0 while num: lowerCAmelCase : str = float(input("""Gamma of: """)) print(F'''gamma({num}) = {gamma(num)}''') print("""\nEnter 0 to exit...""")
25
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = 0 __magic_name__ = False __magic_name__ = 3.0 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _lowerCAmelCase : str = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , snake_case__ ) @require_multi_gpu def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00) lowerCAmelCase : List[str] = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase : List[Any] = """""" lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
25
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : str = {"""vocab_file""": """vocab.txt"""} lowerCAmelCase : Optional[int] = { """vocab_file""": { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""", } } lowerCAmelCase : List[Any] = { """YituTech/conv-bert-base""": 5_12, """YituTech/conv-bert-medium-small""": 5_12, """YituTech/conv-bert-small""": 5_12, } lowerCAmelCase : List[Any] = { """YituTech/conv-bert-base""": {"""do_lower_case""": True}, """YituTech/conv-bert-medium-small""": {"""do_lower_case""": True}, """YituTech/conv-bert-small""": {"""do_lower_case""": True}, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_INIT_CONFIGURATION __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ConvBertTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__=True , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , ) _lowerCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , snake_case__ ) != do_lower_case or normalizer_state.get('strip_accents' , snake_case__ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , snake_case__ ) != tokenize_chinese_chars ): _lowerCAmelCase : str = getattr(snake_case__ , normalizer_state.pop('type' ) ) _lowerCAmelCase : List[str] = do_lower_case _lowerCAmelCase : Any = strip_accents _lowerCAmelCase : Tuple = tokenize_chinese_chars _lowerCAmelCase : str = normalizer_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = do_lower_case def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : List[Any] = [self.sep_token_id] _lowerCAmelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : List[str] = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ )
25
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "trajectory_transformer" __magic_name__ = ["past_key_values"] __magic_name__ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = action_weight _lowerCAmelCase : Optional[int] = reward_weight _lowerCAmelCase : Union[str, Any] = value_weight _lowerCAmelCase : List[str] = max_position_embeddings _lowerCAmelCase : Tuple = block_size _lowerCAmelCase : List[Any] = action_dim _lowerCAmelCase : List[Any] = observation_dim _lowerCAmelCase : Union[str, Any] = transition_dim _lowerCAmelCase : Tuple = learning_rate _lowerCAmelCase : int = n_layer _lowerCAmelCase : Any = n_head _lowerCAmelCase : Tuple = n_embd _lowerCAmelCase : Optional[Any] = embd_pdrop _lowerCAmelCase : Union[str, Any] = attn_pdrop _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : List[Any] = layer_norm_eps _lowerCAmelCase : Union[str, Any] = kaiming_initializer_range _lowerCAmelCase : List[Any] = use_cache super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
25
1
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=0.6 , snake_case__=None , ): '''simple docstring''' _lowerCAmelCase : Dict = parent _lowerCAmelCase : List[str] = batch_size _lowerCAmelCase : Optional[int] = image_size _lowerCAmelCase : List[Any] = patch_size _lowerCAmelCase : List[Any] = num_channels _lowerCAmelCase : Optional[int] = is_training _lowerCAmelCase : Tuple = use_labels _lowerCAmelCase : Any = hidden_size _lowerCAmelCase : List[Any] = num_hidden_layers _lowerCAmelCase : str = num_attention_heads _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : List[Any] = hidden_dropout_prob _lowerCAmelCase : str = attention_probs_dropout_prob _lowerCAmelCase : Optional[Any] = type_sequence_label_size _lowerCAmelCase : List[str] = initializer_range _lowerCAmelCase : int = mask_ratio _lowerCAmelCase : List[str] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _lowerCAmelCase : str = (image_size // patch_size) ** 2 _lowerCAmelCase : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : int = None if self.use_labels: _lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : Dict = self.get_config() return config, pixel_values, labels def a ( self ): '''simple docstring''' return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = TFViTMAEModel(config=snake_case__ ) _lowerCAmelCase : List[Any] = model(snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = TFViTMAEForPreTraining(snake_case__ ) _lowerCAmelCase : Union[str, Any] = model(snake_case__ , training=snake_case__ ) # expected sequence length = num_patches _lowerCAmelCase : int = (self.image_size // self.patch_size) ** 2 _lowerCAmelCase : Dict = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _lowerCAmelCase : Tuple = 1 _lowerCAmelCase : Any = TFViTMAEForPreTraining(snake_case__ ) _lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase : Dict = model(snake_case__ , training=snake_case__ ) _lowerCAmelCase : Optional[Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.prepare_config_and_inputs() ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : List[str] = config_and_inputs _lowerCAmelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __magic_name__ = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {} __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = TFViTMAEModelTester(self ) _lowerCAmelCase : Tuple = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : str = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _lowerCAmelCase : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Layer ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : int = model_class(snake_case__ ) _lowerCAmelCase : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : Tuple = [*signature.parameters.keys()] _lowerCAmelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case__ ) def a ( self ): '''simple docstring''' np.random.seed(2 ) _lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Any = int((config.image_size // config.patch_size) ** 2 ) _lowerCAmelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _lowerCAmelCase : Optional[int] = model_class(snake_case__ ) _lowerCAmelCase : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ ) _lowerCAmelCase : Optional[int] = model(snake_case__ , noise=snake_case__ ) _lowerCAmelCase : int = copy.deepcopy(self._prepare_for_class(snake_case__ , snake_case__ ) ) _lowerCAmelCase : int = model(**snake_case__ , noise=snake_case__ ) _lowerCAmelCase : str = outputs_dict[0].numpy() _lowerCAmelCase : int = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def a ( self ): '''simple docstring''' np.random.seed(2 ) _lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : int = int((config.image_size // config.patch_size) ** 2 ) _lowerCAmelCase : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(snake_case__ ): _lowerCAmelCase : Union[str, Any] = {} for k, v in inputs_dict.items(): if tf.is_tensor(snake_case__ ): _lowerCAmelCase : Tuple = v.numpy() else: _lowerCAmelCase : int = np.array(snake_case__ ) return inputs_np_dict for model_class in self.all_model_classes: _lowerCAmelCase : List[Any] = model_class(snake_case__ ) _lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ ) _lowerCAmelCase : int = prepare_numpy_arrays(snake_case__ ) _lowerCAmelCase : Tuple = model(snake_case__ , noise=snake_case__ ) _lowerCAmelCase : Optional[int] = model(**snake_case__ , noise=snake_case__ ) self.assert_outputs_same(snake_case__ , snake_case__ ) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' np.random.seed(2 ) _lowerCAmelCase : int = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) _lowerCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _lowerCAmelCase : Optional[int] = tf.constant(snake_case__ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _lowerCAmelCase : Union[str, Any] = tf_noise super().check_pt_tf_models(snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' np.random.seed(2 ) _lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : int = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(snake_case__ ) if module_member_name.endswith('MainLayer' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )] for module_member in (getattr(snake_case__ , snake_case__ ),) if isinstance(snake_case__ , snake_case__ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(snake_case__ , '_keras_serializable' , snake_case__ ) } _lowerCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 ) _lowerCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _lowerCAmelCase : str = tf.convert_to_tensor(snake_case__ ) inputs_dict.update({'noise': noise} ) for main_layer_class in tf_main_layer_classes: _lowerCAmelCase : List[Any] = main_layer_class(snake_case__ ) _lowerCAmelCase : Dict = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } _lowerCAmelCase : List[Any] = tf.keras.Model(snake_case__ , outputs=main_layer(snake_case__ ) ) _lowerCAmelCase : Tuple = model(snake_case__ ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase : str = os.path.join(snake_case__ , 'keras_model.h5' ) model.save(snake_case__ ) _lowerCAmelCase : Optional[int] = tf.keras.models.load_model( snake_case__ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(snake_case__ , tf.keras.Model ) _lowerCAmelCase : List[str] = model(snake_case__ ) self.assert_outputs_same(snake_case__ , snake_case__ ) @slow def a ( self ): '''simple docstring''' np.random.seed(2 ) _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 ) _lowerCAmelCase : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _lowerCAmelCase : List[str] = model_class(snake_case__ ) _lowerCAmelCase : str = self._prepare_for_class(snake_case__ , snake_case__ ) _lowerCAmelCase : str = model(snake_case__ , noise=snake_case__ ) if model_class.__name__ == "TFViTMAEModel": _lowerCAmelCase : Tuple = outputs.last_hidden_state.numpy() _lowerCAmelCase : int = 0 else: _lowerCAmelCase : Optional[int] = outputs.logits.numpy() _lowerCAmelCase : Union[str, Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case__ , saved_model=snake_case__ ) _lowerCAmelCase : List[Any] = model_class.from_pretrained(snake_case__ ) _lowerCAmelCase : List[Any] = model(snake_case__ , noise=snake_case__ ) if model_class.__name__ == "TFViTMAEModel": _lowerCAmelCase : int = after_outputs['last_hidden_state'].numpy() _lowerCAmelCase : Dict = 0 else: _lowerCAmelCase : Tuple = after_outputs['logits'].numpy() _lowerCAmelCase : List[Any] = 0 _lowerCAmelCase : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(snake_case__ , 1E-5 ) def a ( self ): '''simple docstring''' np.random.seed(2 ) _lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 ) _lowerCAmelCase : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _lowerCAmelCase : List[str] = model_class(snake_case__ ) _lowerCAmelCase : Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ ) _lowerCAmelCase : Tuple = model(snake_case__ , noise=snake_case__ ) _lowerCAmelCase : Union[str, Any] = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(snake_case__ ) _lowerCAmelCase : int = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config _lowerCAmelCase : str = model_class.from_config(model.config ) _lowerCAmelCase : int = new_model(snake_case__ ) # Build model new_model.set_weights(model.get_weights() ) _lowerCAmelCase : List[str] = new_model(snake_case__ , noise=snake_case__ ) self.assert_outputs_same(snake_case__ , snake_case__ ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def a ( self ): '''simple docstring''' pass @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(snake_case__ ) def lowercase (): """simple docstring""" _lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def a ( self ): '''simple docstring''' return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def a ( self ): '''simple docstring''' np.random.seed(2 ) _lowerCAmelCase : Optional[int] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ) _lowerCAmelCase : str = self.default_image_processor _lowerCAmelCase : List[Any] = prepare_img() _lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='tf' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _lowerCAmelCase : Dict = ViTMAEConfig() _lowerCAmelCase : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _lowerCAmelCase : Dict = np.random.uniform(size=(1, num_patches) ) # forward pass _lowerCAmelCase : Dict = model(**snake_case__ , noise=snake_case__ ) # verify the logits _lowerCAmelCase : Tuple = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , snake_case__ ) _lowerCAmelCase : int = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case__ , atol=1E-4 )
25
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase : Union[str, Any] = 25_00_04 lowerCAmelCase : int = 25_00_20 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = MBartaaTokenizer __magic_name__ = MBartaaTokenizerFast __magic_name__ = True __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = '<s>' _lowerCAmelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case__ ) , 1054 ) def a ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ ) _lowerCAmelCase : Any = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def a ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) _lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=True _lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way _lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=False _lowerCAmelCase : Optional[int] = tempfile.mkdtemp() _lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = "facebook/mbart-large-50-one-to-many-mmt" __magic_name__ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] __magic_name__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] __magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def a ( cls ): '''simple docstring''' _lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) _lowerCAmelCase : Dict = 1 return cls def a ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) def a ( self ): '''simple docstring''' self.assertIn(snake_case__ , self.tokenizer.all_special_ids ) _lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] _lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) _lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertNotIn(self.tokenizer.eos_token , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , snake_case__ ) _lowerCAmelCase : List[str] = 10 _lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0] self.assertEqual(ids[0] , snake_case__ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() _lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case__ ) _lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' ) _lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) _lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' ) _lowerCAmelCase : str = self.tokenizer( text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' ) _lowerCAmelCase : List[Any] = targets['input_ids'] _lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(snake_case__ ) , { # en_XX, A, test, EOS 'input_ids': [[25_0004, 62, 3034, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_0001, } , )
25
1
'''simple docstring''' def lowercase (_A , _A , _A ): """simple docstring""" def update_area_of_max_square(_A , _A ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 _lowerCAmelCase : Any = update_area_of_max_square(_A , col + 1 ) _lowerCAmelCase : Any = update_area_of_max_square(row + 1 , col + 1 ) _lowerCAmelCase : List[Any] = update_area_of_max_square(row + 1 , _A ) if mat[row][col]: _lowerCAmelCase : Dict = 1 + min([right, diagonal, down] ) _lowerCAmelCase : Tuple = max(largest_square_area[0] , _A ) return sub_problem_sol else: return 0 _lowerCAmelCase : int = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def lowercase (_A , _A , _A ): """simple docstring""" def update_area_of_max_square_using_dp_array( _A , _A , _A ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] _lowerCAmelCase : str = update_area_of_max_square_using_dp_array(_A , col + 1 , _A ) _lowerCAmelCase : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _A ) _lowerCAmelCase : Dict = update_area_of_max_square_using_dp_array(row + 1 , _A , _A ) if mat[row][col]: _lowerCAmelCase : Dict = 1 + min([right, diagonal, down] ) _lowerCAmelCase : Dict = max(largest_square_area[0] , _A ) _lowerCAmelCase : Any = sub_problem_sol return sub_problem_sol else: return 0 _lowerCAmelCase : Optional[int] = [0] _lowerCAmelCase : List[str] = [[-1] * cols for _ in range(_A )] update_area_of_max_square_using_dp_array(0 , 0 , _A ) return largest_square_area[0] def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : List[str] = [[0] * (cols + 1) for _ in range(rows + 1 )] _lowerCAmelCase : Union[str, Any] = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): _lowerCAmelCase : List[Any] = dp_array[row][col + 1] _lowerCAmelCase : Union[str, Any] = dp_array[row + 1][col + 1] _lowerCAmelCase : Optional[int] = dp_array[row + 1][col] if mat[row][col] == 1: _lowerCAmelCase : str = 1 + min(_A , _A , _A ) _lowerCAmelCase : Union[str, Any] = max(dp_array[row][col] , _A ) else: _lowerCAmelCase : int = 0 return largest_square_area def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : int = [0] * (cols + 1) _lowerCAmelCase : Dict = [0] * (cols + 1) _lowerCAmelCase : Optional[Any] = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): _lowerCAmelCase : int = current_row[col + 1] _lowerCAmelCase : Optional[int] = next_row[col + 1] _lowerCAmelCase : Any = next_row[col] if mat[row][col] == 1: _lowerCAmelCase : str = 1 + min(_A , _A , _A ) _lowerCAmelCase : Union[str, Any] = max(current_row[col] , _A ) else: _lowerCAmelCase : Dict = 0 _lowerCAmelCase : Union[str, Any] = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
25
'''simple docstring''' from math import isqrt def lowercase (_A ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) ) def lowercase (_A = 1_0**6 ): """simple docstring""" _lowerCAmelCase : str = 0 _lowerCAmelCase : str = 1 _lowerCAmelCase : List[str] = 7 while prime_candidate < max_prime: primes_count += is_prime(_A ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""", """self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""", """self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } lowerCAmelCase : Optional[int] = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def lowercase (_A , _A , _A , _A , _A ): """simple docstring""" for attribute in key.split('.' ): _lowerCAmelCase : Tuple = getattr(_A , _A ) if weight_type is not None: _lowerCAmelCase : Dict = getattr(_A , _A ).shape else: _lowerCAmelCase : Any = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": _lowerCAmelCase : Tuple = value elif weight_type == "weight_g": _lowerCAmelCase : Any = value elif weight_type == "weight_v": _lowerCAmelCase : List[str] = value elif weight_type == "bias": _lowerCAmelCase : int = value else: _lowerCAmelCase : str = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : List[str] = [] _lowerCAmelCase : int = fairseq_model.state_dict() _lowerCAmelCase : int = hf_model.feature_extractor for name, value in fairseq_dict.items(): _lowerCAmelCase : Any = False if "conv_layers" in name: load_conv_layer( _A , _A , _A , _A , hf_model.config.feat_extract_norm == 'group' , ) _lowerCAmelCase : Dict = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: _lowerCAmelCase : Tuple = True if "*" in mapped_key: _lowerCAmelCase : Optional[Any] = name.split(_A )[0].split('.' )[-2] _lowerCAmelCase : str = mapped_key.replace('*' , _A ) if "weight_g" in name: _lowerCAmelCase : List[str] = 'weight_g' elif "weight_v" in name: _lowerCAmelCase : Optional[int] = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: _lowerCAmelCase : str = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCAmelCase : Optional[int] = 'weight' else: _lowerCAmelCase : Union[str, Any] = None set_recursively(_A , _A , _A , _A , _A ) continue if not is_used: unused_weights.append(_A ) logger.warning(f'Unused weights: {unused_weights}' ) def lowercase (_A , _A , _A , _A , _A ): """simple docstring""" _lowerCAmelCase : List[str] = full_name.split('conv_layers.' )[-1] _lowerCAmelCase : Tuple = name.split('.' ) _lowerCAmelCase : List[str] = int(items[0] ) _lowerCAmelCase : str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _lowerCAmelCase : Any = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _lowerCAmelCase : Tuple = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) _lowerCAmelCase : List[str] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) _lowerCAmelCase : List[str] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(_A ) @torch.no_grad() def lowercase (_A , _A , _A=None ): """simple docstring""" _lowerCAmelCase : str = torch.load(_A ) _lowerCAmelCase : Optional[Any] = WavLMConfigOrig(checkpoint['cfg'] ) _lowerCAmelCase : Union[str, Any] = WavLMOrig(_A ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: _lowerCAmelCase : List[str] = WavLMConfig.from_pretrained(_A ) else: _lowerCAmelCase : Optional[Any] = WavLMConfig() _lowerCAmelCase : Optional[int] = WavLMModel(_A ) recursively_load_weights(_A , _A ) hf_wavlm.save_pretrained(_A ) if __name__ == "__main__": lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCAmelCase : List[str] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
25
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mvp" __magic_name__ = ["past_key_values"] __magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = max_position_embeddings _lowerCAmelCase : Optional[Any] = d_model _lowerCAmelCase : Optional[int] = encoder_ffn_dim _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = encoder_attention_heads _lowerCAmelCase : Any = decoder_ffn_dim _lowerCAmelCase : Optional[Any] = decoder_layers _lowerCAmelCase : int = decoder_attention_heads _lowerCAmelCase : Union[str, Any] = dropout _lowerCAmelCase : List[Any] = attention_dropout _lowerCAmelCase : List[str] = activation_dropout _lowerCAmelCase : Optional[Any] = activation_function _lowerCAmelCase : Any = init_std _lowerCAmelCase : Any = encoder_layerdrop _lowerCAmelCase : Union[str, Any] = decoder_layerdrop _lowerCAmelCase : Optional[int] = classifier_dropout _lowerCAmelCase : List[Any] = use_cache _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCAmelCase : Optional[Any] = use_prompt _lowerCAmelCase : Optional[Any] = prompt_length _lowerCAmelCase : Any = prompt_mid_dim super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ): _lowerCAmelCase : Any = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
25
1
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : str = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = XGLMTokenizer __magic_name__ = XGLMTokenizerFast __magic_name__ = True __magic_name__ = True def a ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : List[str] = XGLMTokenizer(snake_case__ , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = '<pad>' _lowerCAmelCase : List[str] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(len(snake_case__ ) , 1008 ) def a ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = XGLMTokenizer(snake_case__ , keep_accents=snake_case__ ) _lowerCAmelCase : Union[str, Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) _lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def a ( self ): '''simple docstring''' return XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) def a ( self ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(snake_case__ , f.name ) _lowerCAmelCase : int = XGLMTokenizer(f.name , keep_accents=snake_case__ ) _lowerCAmelCase : Any = pickle.dumps(snake_case__ ) pickle.loads(snake_case__ ) def a ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return _lowerCAmelCase : List[str] = self.get_tokenizer() _lowerCAmelCase : Any = self.get_rust_tokenizer() _lowerCAmelCase : Tuple = 'I was born in 92000, and this is falsé.' _lowerCAmelCase : List[Any] = tokenizer.tokenize(snake_case__ ) _lowerCAmelCase : Dict = rust_tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) _lowerCAmelCase : Union[str, Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) _lowerCAmelCase : Union[str, Any] = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) _lowerCAmelCase : List[str] = self.get_rust_tokenizer() _lowerCAmelCase : int = tokenizer.encode(snake_case__ ) _lowerCAmelCase : List[Any] = rust_tokenizer.encode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = 'Hello World!' _lowerCAmelCase : Optional[Any] = [2, 3_1227, 4447, 35] self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off _lowerCAmelCase : int = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735] # fmt: on self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = { 'input_ids': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='facebook/xglm-564M' , padding=snake_case__ , )
25
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) lowerCAmelCase : str = None lowerCAmelCase : Optional[int] = { """7B""": 1_10_08, """13B""": 1_38_24, """30B""": 1_79_20, """65B""": 2_20_16, """70B""": 2_86_72, } lowerCAmelCase : Optional[int] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def lowercase (_A , _A=1 , _A=2_5_6 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowercase (_A ): """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def lowercase (_A , _A ): """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def lowercase (_A , _A , _A , _A=True ): """simple docstring""" os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) _lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) ) _lowerCAmelCase : List[str] = NUM_SHARDS[model_size] _lowerCAmelCase : str = params['n_layers'] _lowerCAmelCase : Optional[int] = params['n_heads'] _lowerCAmelCase : int = n_heads // num_shards _lowerCAmelCase : Optional[int] = params['dim'] _lowerCAmelCase : Union[str, Any] = dim // n_heads _lowerCAmelCase : Union[str, Any] = 10_000.0 _lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA _lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads _lowerCAmelCase : Optional[int] = dim // num_key_value_heads else: # compatibility with other checkpoints _lowerCAmelCase : Union[str, Any] = n_heads _lowerCAmelCase : Any = n_heads_per_shard _lowerCAmelCase : Optional[Any] = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'Fetching all parameters from the checkpoint at {input_base_path}.' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded _lowerCAmelCase : List[Any] = [ torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' ) for i in range(_A ) ] _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Union[str, Any] = {'weight_map': {}} for layer_i in range(_A ): _lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : str = { f'model.layers.{layer_i}.self_attn.q_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wq.weight'] ), f'model.layers.{layer_i}.self_attn.k_proj.weight': permute( loaded[f'layers.{layer_i}.attention.wk.weight'] ), f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'], f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'], f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'], f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'], f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'], f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'], f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _lowerCAmelCase : str = { f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][ f'layers.{layer_i}.attention_norm.weight' ].clone(), f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][ f'layers.{layer_i}.ffn_norm.weight' ].clone(), } _lowerCAmelCase : List[str] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) _lowerCAmelCase : Optional[int] = permute( torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wk.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) _lowerCAmelCase : Dict = torch.cat( [ loaded[i][f'layers.{layer_i}.attention.wv.weight'].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) _lowerCAmelCase : Dict = torch.cat( [loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : Tuple = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 ) _lowerCAmelCase : int = inv_freq for k, v in state_dict.items(): _lowerCAmelCase : Optional[Any] = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) _lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin' if model_size == "7B": # Unsharded _lowerCAmelCase : List[str] = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: _lowerCAmelCase : List[str] = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): _lowerCAmelCase : int = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs _lowerCAmelCase : Tuple = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) _lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 _lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6 _lowerCAmelCase : List[Any] = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) _lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' ) _lowerCAmelCase : List[Any] = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) _lowerCAmelCase : Any = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) _lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
25
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' ) _lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('google/mt5-small' ) _lowerCAmelCase : Tuple = tokenizer('Hello there' , return_tensors='tf' ).input_ids _lowerCAmelCase : str = tokenizer('Hi I am' , return_tensors='tf' ).input_ids _lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ ).loss _lowerCAmelCase : Union[str, Any] = -tf.math.reduce_mean(snake_case__ ).numpy() _lowerCAmelCase : Optional[int] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
25
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = 1 __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None def a ( self ): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
25
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase : Optional[int] = None lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Any = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """facebook/nllb-large-en-ro""": 10_24, """facebook/nllb-200-distilled-600M""": 10_24, } # fmt: off lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = NllbTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token _lowerCAmelCase : Dict = legacy_behaviour super().__init__( vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[str] = vocab_file _lowerCAmelCase : int = False if not self.vocab_file else True _lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _lowerCAmelCase : Any = { lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn' _lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang ) _lowerCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def a ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _lowerCAmelCase : Optional[Any] = src_lang _lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ ) _lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ ) _lowerCAmelCase : Optional[Any] = tgt_lang_id return inputs def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[str] = src_lang _lowerCAmelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def a ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def a ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : Dict = [] _lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : int = [self.eos_token_id] _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : int = [] _lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : List[str] = [self.eos_token_id] _lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return _lowerCAmelCase : Union[str, Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
25
'''simple docstring''' lowerCAmelCase : List[str] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCAmelCase : List[str] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
25
1
'''simple docstring''' def lowercase (_A = 5_0 ): """simple docstring""" _lowerCAmelCase : int = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F'''{solution() = }''')
25
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : Union[str, Any] = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
25
1
'''simple docstring''' import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__) def lowercase (_A , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = False , ): """simple docstring""" _lowerCAmelCase : str = bnb_quantization_config.load_in_abit _lowerCAmelCase : Dict = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) _lowerCAmelCase : Union[str, Any] = [] # custom device map if isinstance(_A , _A ) and len(device_map.keys() ) > 1: _lowerCAmelCase : List[Any] = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: _lowerCAmelCase : Optional[Any] = get_keys_to_not_convert(_A ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_A ) _lowerCAmelCase : List[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: _lowerCAmelCase : Optional[Any] = [] _lowerCAmelCase : List[str] = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_A ) # compatibility with peft _lowerCAmelCase : Tuple = load_in_abit _lowerCAmelCase : Union[str, Any] = load_in_abit _lowerCAmelCase : Dict = get_parameter_device(_A ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) _lowerCAmelCase : Any = replace_with_bnb_layers(_A , _A , modules_to_not_convert=_A ) # convert param to the right dtype _lowerCAmelCase : Optional[int] = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: _lowerCAmelCase : str = name.replace('.weight' , '' ).replace('.bias' , '' ) _lowerCAmelCase : List[str] = getattr(_A , _A , _A ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_A ): param.to(_A ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f'The model device type is {model_device.type}. However, cuda is needed for quantization.' 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' ) else: with init_empty_weights(): _lowerCAmelCase : Optional[Any] = replace_with_bnb_layers( _A , _A , modules_to_not_convert=_A ) _lowerCAmelCase : Union[str, Any] = get_quantized_model_device_map( _A , _A , _A , max_memory=_A , no_split_module_classes=_A , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): _lowerCAmelCase : List[str] = True _lowerCAmelCase : Optional[Any] = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _A , _A , _A , dtype=bnb_quantization_config.torch_dtype , offload_folder=_A , offload_state_dict=_A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_A , device_map=_A , offload_dir=_A ) def lowercase (_A , _A , _A=None , _A=None , _A=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): _lowerCAmelCase : int = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_A , _A ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) _lowerCAmelCase : Dict = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) _lowerCAmelCase : Union[str, Any] = {} _lowerCAmelCase : Optional[int] = special_dtypes _lowerCAmelCase : Optional[Any] = no_split_module_classes _lowerCAmelCase : Dict = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": _lowerCAmelCase : Optional[Any] = get_balanced_memory( _A , low_zero=(device_map == 'balanced_low_0') , max_memory=_A , **_A , ) _lowerCAmelCase : Tuple = max_memory _lowerCAmelCase : List[str] = infer_auto_device_map(_A , **_A ) if isinstance(_A , _A ): # check if don't have any quantized module on the cpu _lowerCAmelCase : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules _lowerCAmelCase : Any = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def lowercase (_A , _A , _A=None , _A=None ): """simple docstring""" if modules_to_not_convert is None: _lowerCAmelCase : str = [] _lowerCAmelCase , _lowerCAmelCase : List[Any] = _replace_with_bnb_layers( _A , _A , _A , _A ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def lowercase (_A , _A , _A=None , _A=None , ): """simple docstring""" _lowerCAmelCase : List[str] = False for name, module in model.named_children(): if current_key_name is None: _lowerCAmelCase : str = [] current_key_name.append(_A ) if isinstance(_A , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` _lowerCAmelCase : int = '.'.join(_A ) _lowerCAmelCase : Tuple = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: _lowerCAmelCase : str = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: _lowerCAmelCase : List[Any] = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_A , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: _lowerCAmelCase : Any = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) _lowerCAmelCase : List[str] = module.weight.data if module.bias is not None: _lowerCAmelCase : str = module.bias.data bnb_module.requires_grad_(_A ) setattr(_A , _A , _A ) _lowerCAmelCase : int = True if len(list(module.children() ) ) > 0: _lowerCAmelCase , _lowerCAmelCase : List[Any] = _replace_with_bnb_layers( _A , _A , _A , _A ) _lowerCAmelCase : Union[str, Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowercase (_A ): """simple docstring""" with init_empty_weights(): _lowerCAmelCase : Any = deepcopy(_A ) # this has 0 cost since it is done inside `init_empty_weights` context manager` _lowerCAmelCase : Optional[Any] = find_tied_parameters(_A ) # For compatibility with Accelerate < 0.18 if isinstance(_A , _A ): _lowerCAmelCase : Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: _lowerCAmelCase : Any = sum(_A , [] ) _lowerCAmelCase : List[str] = len(_A ) > 0 # Check if it is a base model _lowerCAmelCase : str = False if hasattr(_A , 'base_model_prefix' ): _lowerCAmelCase : List[str] = not hasattr(_A , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head _lowerCAmelCase : str = list(model.named_children() ) _lowerCAmelCase : Optional[int] = [list_modules[-1][0]] # add last module together with tied weights _lowerCAmelCase : int = set(_A ) - set(_A ) _lowerCAmelCase : Union[str, Any] = list(set(_A ) ) + list(_A ) # remove ".weight" from the keys _lowerCAmelCase : List[Any] = ['.weight', '.bias'] _lowerCAmelCase : Any = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: _lowerCAmelCase : Dict = name.replace(_A , '' ) filtered_module_names.append(_A ) return filtered_module_names def lowercase (_A ): """simple docstring""" for m in model.modules(): if isinstance(_A , bnb.nn.Linearabit ): return True return False def lowercase (_A ): """simple docstring""" return next(parameter.parameters() ).device def lowercase (_A , _A , _A , _A , _A , _A , _A ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(_A , _A , 0 , dtype=_A , value=_A ) _lowerCAmelCase : Tuple = param_name _lowerCAmelCase : Optional[Any] = model if "." in tensor_name: _lowerCAmelCase : List[str] = tensor_name.split('.' ) for split in splits[:-1]: _lowerCAmelCase : str = getattr(_A , _A ) if new_module is None: raise ValueError(f'{module} has no attribute {split}.' ) _lowerCAmelCase : Optional[int] = new_module _lowerCAmelCase : Tuple = splits[-1] # offload weights _lowerCAmelCase : Optional[Any] = False offload_weight(module._parameters[tensor_name] , _A , _A , index=_A ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _A , index=_A , ) else: offload_weight(_A , _A , _A , index=_A ) offload_weight(_A , param_name.replace('weight' , 'SCB' ) , _A , index=_A ) set_module_tensor_to_device(_A , _A , 'meta' , dtype=_A , value=torch.empty(*param.size() ) )
25
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "nat" __magic_name__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Tuple = embed_dim _lowerCAmelCase : Any = depths _lowerCAmelCase : Dict = len(snake_case__ ) _lowerCAmelCase : str = num_heads _lowerCAmelCase : Dict = kernel_size _lowerCAmelCase : Union[str, Any] = mlp_ratio _lowerCAmelCase : int = qkv_bias _lowerCAmelCase : Optional[Any] = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : List[str] = drop_path_rate _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : Tuple = layer_norm_eps _lowerCAmelCase : Dict = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) _lowerCAmelCase : Any = layer_scale_init_value _lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )] _lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
25
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) lowerCAmelCase : Any = { """MIT/ast-finetuned-audioset-10-10-0.4593""": ( """https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json""" ), } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "audio-spectrogram-transformer" def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=16 , snake_case__=True , snake_case__=10 , snake_case__=10 , snake_case__=1024 , snake_case__=128 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Dict = hidden_size _lowerCAmelCase : Optional[Any] = num_hidden_layers _lowerCAmelCase : Optional[int] = num_attention_heads _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : Optional[Any] = hidden_act _lowerCAmelCase : str = hidden_dropout_prob _lowerCAmelCase : Dict = attention_probs_dropout_prob _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : Union[str, Any] = layer_norm_eps _lowerCAmelCase : Any = patch_size _lowerCAmelCase : Dict = qkv_bias _lowerCAmelCase : Any = frequency_stride _lowerCAmelCase : List[Any] = time_stride _lowerCAmelCase : Any = max_length _lowerCAmelCase : Optional[Any] = num_mel_bins
25
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : str = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """roberta-base""": 5_12, """roberta-large""": 5_12, """roberta-large-mnli""": 5_12, """distilroberta-base""": 5_12, """roberta-base-openai-detector""": 5_12, """roberta-large-openai-detector""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = RobertaTokenizer def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) ) _lowerCAmelCase : List[Any] = add_prefix_space _lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = add_prefix_space _lowerCAmelCase : Union[str, Any] = 'post_processor' _lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ ) if tokenizer_component_instance: _lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCAmelCase : Any = tuple(state['sep'] ) if "cls" in state: _lowerCAmelCase : str = tuple(state['cls'] ) _lowerCAmelCase : List[str] = False if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: _lowerCAmelCase : int = add_prefix_space _lowerCAmelCase : Tuple = True if state.get('trim_offsets' , snake_case__ ) != trim_offsets: _lowerCAmelCase : Union[str, Any] = trim_offsets _lowerCAmelCase : Optional[int] = True if changes_to_apply: _lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) ) _lowerCAmelCase : Optional[int] = component_class(**snake_case__ ) setattr(self.backend_tokenizer , snake_case__ , snake_case__ ) @property def a ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value _lowerCAmelCase : Tuple = value def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
25
1
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : Any = hf_hub_url(repo_id=_A , path=_A , revision=_A ) assert url == f'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(_A )}'
25
'''simple docstring''' lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag. lowerCAmelCase : Optional[int] = 1 # The second color of the flag. lowerCAmelCase : int = 2 # The third color of the flag. lowerCAmelCase : Any = (red, white, blue) def lowercase (_A ): """simple docstring""" if not sequence: return [] if len(_A ) == 1: return list(_A ) _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : List[str] = len(_A ) - 1 _lowerCAmelCase : Optional[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid] high -= 1 else: _lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values' raise ValueError(_A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip() lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")] print(F'''{dutch_national_flag_sort(unsorted)}''')
25
1
'''simple docstring''' import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) lowerCAmelCase : str = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } lowerCAmelCase : Tuple = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } lowerCAmelCase : Dict = {"""facebook/blenderbot-3B""": 1_28} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowercase (): """simple docstring""" _lowerCAmelCase : Union[str, Any] = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) _lowerCAmelCase : Union[str, Any] = bs[:] _lowerCAmelCase : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(_A ) cs.append(2**8 + n ) n += 1 _lowerCAmelCase : str = [chr(_A ) for n in cs] return dict(zip(_A , _A ) ) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = set() _lowerCAmelCase : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _lowerCAmelCase : Union[str, Any] = char return pairs class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] def __init__( self , snake_case__ , snake_case__ , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Any = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token _lowerCAmelCase : List[str] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token _lowerCAmelCase : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token _lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token _lowerCAmelCase : Any = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token _lowerCAmelCase : Any = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token super().__init__( errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , ) with open(snake_case__ , encoding='utf-8' ) as vocab_handle: _lowerCAmelCase : str = json.load(snake_case__ ) _lowerCAmelCase : List[Any] = {v: k for k, v in self.encoder.items()} _lowerCAmelCase : Dict = errors # how to handle errors in decoding _lowerCAmelCase : int = bytes_to_unicode() _lowerCAmelCase : Tuple = {v: k for k, v in self.byte_encoder.items()} with open(snake_case__ , encoding='utf-8' ) as merges_handle: _lowerCAmelCase : str = merges_handle.read().split('\n' )[1:-1] _lowerCAmelCase : str = [tuple(merge.split() ) for merge in bpe_merges] _lowerCAmelCase : Tuple = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) _lowerCAmelCase : List[str] = {} _lowerCAmelCase : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _lowerCAmelCase : Optional[int] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def a ( self ): '''simple docstring''' return len(self.encoder ) def a ( self ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def a ( self , snake_case__ ): '''simple docstring''' if token in self.cache: return self.cache[token] _lowerCAmelCase : Optional[Any] = tuple(snake_case__ ) _lowerCAmelCase : Tuple = get_pairs(snake_case__ ) if not pairs: return token while True: _lowerCAmelCase : List[str] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = bigram _lowerCAmelCase : Tuple = [] _lowerCAmelCase : str = 0 while i < len(snake_case__ ): try: _lowerCAmelCase : List[Any] = word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _lowerCAmelCase : str = j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _lowerCAmelCase : List[str] = tuple(snake_case__ ) _lowerCAmelCase : int = new_word if len(snake_case__ ) == 1: break else: _lowerCAmelCase : Tuple = get_pairs(snake_case__ ) _lowerCAmelCase : Union[str, Any] = ' '.join(snake_case__ ) _lowerCAmelCase : Union[str, Any] = word return word def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = [] for token in re.findall(self.pat , snake_case__ ): _lowerCAmelCase : Optional[int] = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case__ ).split(' ' ) ) return bpe_tokens def a ( self , snake_case__ ): '''simple docstring''' return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def a ( self , snake_case__ ): '''simple docstring''' return self.decoder.get(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = ''.join(snake_case__ ) _lowerCAmelCase : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase : Any = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _lowerCAmelCase : str = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(snake_case__ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '\n' ) _lowerCAmelCase : int = 0 with open(snake_case__ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ' Please check that the tokenizer is not corrupted!' ) _lowerCAmelCase : Tuple = token_index writer.write(' '.join(snake_case__ ) + '\n' ) index += 1 return vocab_file, merge_file def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : List[str] = [self.sep_token_id] _lowerCAmelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self , snake_case__ , snake_case__=False , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(snake_case__ ) > 0 and not text[0].isspace()): _lowerCAmelCase : Tuple = ' ' + text return (text, kwargs) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(snake_case__ ) _lowerCAmelCase : List[Any] = ' '.join(snake_case__ ) _lowerCAmelCase : str = self.encode(snake_case__ ) if len(snake_case__ ) > self.model_max_length: _lowerCAmelCase : Any = input_ids[-self.model_max_length :] logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
25
'''simple docstring''' def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1] _lowerCAmelCase : int = 6 _lowerCAmelCase : Dict = 1 _lowerCAmelCase : Optional[int] = 1_9_0_1 _lowerCAmelCase : Optional[Any] = 0 while year < 2_0_0_1: day += 7 if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] elif day > 2_9 and month == 2: month += 1 _lowerCAmelCase : List[str] = day - 2_9 else: if day > days_per_month[month - 1]: month += 1 _lowerCAmelCase : List[str] = day - days_per_month[month - 2] if month > 1_2: year += 1 _lowerCAmelCase : Optional[int] = 1 if year < 2_0_0_1 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
25
1
'''simple docstring''' from __future__ import annotations def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = [True] * limit _lowerCAmelCase : str = False _lowerCAmelCase : Optional[int] = False _lowerCAmelCase : str = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): _lowerCAmelCase : Tuple = i * 2 while index < limit: _lowerCAmelCase : Dict = False _lowerCAmelCase : Dict = index + i _lowerCAmelCase : List[str] = [2] for i in range(3 , _A , 2 ): if is_prime[i]: primes.append(_A ) return primes def lowercase (_A = 1_0_0_0_0_0_0 ): """simple docstring""" _lowerCAmelCase : Optional[Any] = prime_sieve(_A ) _lowerCAmelCase : List[Any] = 0 _lowerCAmelCase : int = 0 for i in range(len(_A ) ): for j in range(i + length , len(_A ) ): _lowerCAmelCase : Dict = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: _lowerCAmelCase : List[str] = j - i _lowerCAmelCase : Union[str, Any] = sol return largest if __name__ == "__main__": print(F'''{solution() = }''')
25
'''simple docstring''' def lowercase (_A = 1_0_0_0_0_0_0 ): """simple docstring""" _lowerCAmelCase : Any = set(range(3 , _A , 2 ) ) primes.add(2 ) for p in range(3 , _A , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _A , _A ) ) ) _lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )] for p in primes: for n in range(_A , limit + 1 , _A ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
25
1
'''simple docstring''' from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase (_A , _A , _A = False ): """simple docstring""" if radian_mode: return [magnitude * cos(_A ), magnitude * sin(_A )] return [magnitude * cos(radians(_A ) ), magnitude * sin(radians(_A ) )] def lowercase (_A , _A , _A = 1_0**-1 ): """simple docstring""" _lowerCAmelCase : NDArray[floataa] = cross(_A , _A ) _lowerCAmelCase : float = sum(_A ) return abs(_A ) < eps if __name__ == "__main__": # Test to check if it works lowerCAmelCase : Tuple = array( [ polar_force(7_18.4, 1_80 - 30), polar_force(8_79.54, 45), polar_force(1_00, -90), ] ) lowerCAmelCase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg lowerCAmelCase : List[Any] = array( [ polar_force(30 * 9.81, 15), polar_force(2_15, 1_80 - 45), polar_force(2_64, 90 - 30), ] ) lowerCAmelCase : Union[str, Any] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg lowerCAmelCase : Optional[Any] = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]]) lowerCAmelCase : Any = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
25
'''simple docstring''' import argparse import os import re lowerCAmelCase : Tuple = """src/transformers""" # Pattern that looks at the indentation in a line. lowerCAmelCase : str = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""") def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = _re_indent.search(_A ) return "" if search is None else search.groups()[0] def lowercase (_A , _A="" , _A=None , _A=None ): """simple docstring""" _lowerCAmelCase : int = 0 _lowerCAmelCase : Dict = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_A ): index += 1 _lowerCAmelCase : Dict = ['\n'.join(lines[:index] )] else: _lowerCAmelCase : str = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCAmelCase : List[Any] = [lines[index]] index += 1 while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_A ) ) if index < len(_A ) - 1: _lowerCAmelCase : Union[str, Any] = [lines[index + 1]] index += 1 else: _lowerCAmelCase : Union[str, Any] = [] else: blocks.append('\n'.join(_A ) ) _lowerCAmelCase : List[str] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_A ) > 0: blocks.append('\n'.join(_A ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_A ): blocks.append('\n'.join(lines[index:] ) ) return blocks def lowercase (_A ): """simple docstring""" def _inner(_A ): return key(_A ).lower().replace('_' , '' ) return _inner def lowercase (_A , _A=None ): """simple docstring""" def noop(_A ): return x if key is None: _lowerCAmelCase : List[Any] = noop # Constants are all uppercase, they go first. _lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()] # Functions begin with a lowercase, they go last. _lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()] _lowerCAmelCase : Dict = ignore_underscore(_A ) return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A ) def lowercase (_A ): """simple docstring""" def _replace(_A ): _lowerCAmelCase : Dict = match.groups()[0] if "," not in imports: return f'[{imports}]' _lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : int = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]" _lowerCAmelCase : Tuple = import_statement.split('\n' ) if len(_A ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1 _lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] ) _lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_A ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCAmelCase : List[str] = keys[:-1] _lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] ) return "\n".join(_A ) else: # Finally we have to deal with imports fitting on one line _lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A ) return import_statement def lowercase (_A , _A=True ): """simple docstring""" with open(_A , encoding='utf-8' ) as f: _lowerCAmelCase : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCAmelCase : Tuple = split_code_in_indented_blocks( _A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_A ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCAmelCase : Tuple = main_blocks[block_idx] _lowerCAmelCase : int = block.split('\n' ) # Get to the start of the imports. _lowerCAmelCase : Tuple = 0 while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCAmelCase : Dict = len(_A ) else: line_idx += 1 if line_idx >= len(_A ): continue # Ignore beginning and last line: they don't contain anything. _lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] ) _lowerCAmelCase : Tuple = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None] _lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCAmelCase : int = 0 _lowerCAmelCase : Optional[Any] = [] for i in range(len(_A ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: _lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_A ) count += 1 # And we put our main block back together with its first and last line. _lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_A ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(_A , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(_A ) ) def lowercase (_A=True ): """simple docstring""" _lowerCAmelCase : int = [] for root, _, files in os.walk(_A ): if "__init__.py" in files: _lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A ) if result: _lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )] if len(_A ) > 0: raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' ) if __name__ == "__main__": lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCAmelCase : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
25
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : List[str] = { """configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = ["""AlbertTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = ["""AlbertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """AlbertForMaskedLM""", """AlbertForMultipleChoice""", """AlbertForPreTraining""", """AlbertForQuestionAnswering""", """AlbertForSequenceClassification""", """AlbertForTokenClassification""", """AlbertModel""", """AlbertPreTrainedModel""", """load_tf_weights_in_albert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = [ """TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAlbertForMaskedLM""", """TFAlbertForMultipleChoice""", """TFAlbertForPreTraining""", """TFAlbertForQuestionAnswering""", """TFAlbertForSequenceClassification""", """TFAlbertForTokenClassification""", """TFAlbertMainLayer""", """TFAlbertModel""", """TFAlbertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """FlaxAlbertForMaskedLM""", """FlaxAlbertForMultipleChoice""", """FlaxAlbertForPreTraining""", """FlaxAlbertForQuestionAnswering""", """FlaxAlbertForSequenceClassification""", """FlaxAlbertForTokenClassification""", """FlaxAlbertModel""", """FlaxAlbertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = KandinskyVaaInpaintPipeline __magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"] __magic_name__ = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] __magic_name__ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__ = False @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return self.time_input_dim @property def a ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def a ( self ): '''simple docstring''' return 100 @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ ) return model @property def a ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs ) return model def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.dummy_unet _lowerCAmelCase : List[Any] = self.dummy_movq _lowerCAmelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , ) _lowerCAmelCase : Any = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create init_image _lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) ) # create mask _lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase : Dict = 0 if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Optional[int] = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = 'cpu' _lowerCAmelCase : int = self.get_dummy_components() _lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) ) _lowerCAmelCase : int = output.images _lowerCAmelCase : int = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] _lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] _lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(F'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : List[str] = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def a ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) _lowerCAmelCase : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : List[str] = 'a hat' _lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) _lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) _lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _lowerCAmelCase : Optional[Any] = pipeline( image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
25
1
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""): raise Exception("""requires fairseq >= 1.0.0a""") logging.set_verbosity_info() lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Tuple = """Hello world! cécé herlolip""" def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : Any = FairseqRobertaModel.from_pretrained(_A ) roberta.eval() # disable dropout _lowerCAmelCase : Tuple = roberta.model.encoder.sentence_encoder _lowerCAmelCase : List[Any] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: _lowerCAmelCase : List[Any] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0] print('Our RoBERTa config:' , _A ) _lowerCAmelCase : List[str] = XLMRobertaXLForSequenceClassification(_A ) if classification_head else XLMRobertaXLForMaskedLM(_A ) model.eval() # Now let's copy all the weights. # Embeddings _lowerCAmelCase : Optional[int] = roberta_sent_encoder.embed_tokens.weight _lowerCAmelCase : Optional[Any] = roberta_sent_encoder.embed_positions.weight _lowerCAmelCase : Union[str, Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. _lowerCAmelCase : Dict = roberta_sent_encoder.layer_norm.weight _lowerCAmelCase : List[Any] = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _lowerCAmelCase : BertLayer = model.roberta.encoder.layer[i] _lowerCAmelCase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] _lowerCAmelCase : RobertaAttention = layer.attention _lowerCAmelCase : int = roberta_layer.self_attn_layer_norm.weight _lowerCAmelCase : str = roberta_layer.self_attn_layer_norm.bias # self attention _lowerCAmelCase : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) _lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.q_proj.weight _lowerCAmelCase : Union[str, Any] = roberta_layer.self_attn.q_proj.bias _lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.k_proj.weight _lowerCAmelCase : Dict = roberta_layer.self_attn.k_proj.bias _lowerCAmelCase : List[Any] = roberta_layer.self_attn.v_proj.weight _lowerCAmelCase : Optional[int] = roberta_layer.self_attn.v_proj.bias # self-attention output _lowerCAmelCase : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape _lowerCAmelCase : Tuple = roberta_layer.self_attn.out_proj.weight _lowerCAmelCase : Dict = roberta_layer.self_attn.out_proj.bias # this one is final layer norm _lowerCAmelCase : Dict = roberta_layer.final_layer_norm.weight _lowerCAmelCase : Union[str, Any] = roberta_layer.final_layer_norm.bias # intermediate _lowerCAmelCase : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase : List[Any] = roberta_layer.fca.weight _lowerCAmelCase : int = roberta_layer.fca.bias # output _lowerCAmelCase : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase : Tuple = roberta_layer.fca.weight _lowerCAmelCase : Tuple = roberta_layer.fca.bias # end of layer if classification_head: _lowerCAmelCase : str = roberta.model.classification_heads['mnli'].dense.weight _lowerCAmelCase : Optional[int] = roberta.model.classification_heads['mnli'].dense.bias _lowerCAmelCase : Optional[Any] = roberta.model.classification_heads['mnli'].out_proj.weight _lowerCAmelCase : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.bias else: # LM Head _lowerCAmelCase : List[Any] = roberta.model.encoder.lm_head.dense.weight _lowerCAmelCase : Dict = roberta.model.encoder.lm_head.dense.bias _lowerCAmelCase : Any = roberta.model.encoder.lm_head.layer_norm.weight _lowerCAmelCase : Dict = roberta.model.encoder.lm_head.layer_norm.bias _lowerCAmelCase : int = roberta.model.encoder.lm_head.weight _lowerCAmelCase : List[Any] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. _lowerCAmelCase : torch.Tensor = roberta.encode(_A ).unsqueeze(0 ) # batch of size 1 _lowerCAmelCase : int = model(_A )[0] if classification_head: _lowerCAmelCase : List[Any] = roberta.model.classification_heads['mnli'](roberta.extract_features(_A ) ) else: _lowerCAmelCase : Dict = roberta.model(_A )[0] print(our_output.shape , their_output.shape ) _lowerCAmelCase : List[str] = torch.max(torch.abs(our_output - their_output ) ).item() print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7 _lowerCAmelCase : List[Any] = torch.allclose(_A , _A , atol=1E-3 ) print('Do both models output the same tensors?' , '🔥' if success else '💩' ) if not success: raise Exception('Something went wRoNg' ) pathlib.Path(_A ).mkdir(parents=_A , exist_ok=_A ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(_A ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
25
'''simple docstring''' from __future__ import annotations from typing import Any def lowercase (_A ): """simple docstring""" if not postfix_notation: return 0 _lowerCAmelCase : int = {'+', '-', '*', '/'} _lowerCAmelCase : list[Any] = [] for token in postfix_notation: if token in operations: _lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_A ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' from __future__ import annotations from collections.abc import Generator def lowercase (): """simple docstring""" _lowerCAmelCase : dict[int, int] = {} _lowerCAmelCase : str = 2 while True: _lowerCAmelCase : Optional[Any] = factor_map.pop(_A , _A ) if factor: _lowerCAmelCase : Optional[int] = factor + prime while x in factor_map: x += factor _lowerCAmelCase : Dict = factor else: _lowerCAmelCase : Optional[Any] = prime yield prime prime += 1 def lowercase (_A = 1E10 ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = sieve() _lowerCAmelCase : Union[str, Any] = 1 while True: _lowerCAmelCase : List[Any] = next(_A ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(_A ) n += 2 if __name__ == "__main__": print(solution())
25
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mobilenet_v2" def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Union[str, Any] = image_size _lowerCAmelCase : List[Any] = depth_multiplier _lowerCAmelCase : List[Any] = depth_divisible_by _lowerCAmelCase : Optional[Any] = min_depth _lowerCAmelCase : str = expand_ratio _lowerCAmelCase : str = output_stride _lowerCAmelCase : Any = first_layer_is_expansion _lowerCAmelCase : int = finegrained_output _lowerCAmelCase : str = hidden_act _lowerCAmelCase : List[str] = tf_padding _lowerCAmelCase : Optional[int] = classifier_dropout_prob _lowerCAmelCase : int = initializer_range _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : str = semantic_loss_ignore_index class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = version.parse("1.11" ) @property def a ( self ): '''simple docstring''' return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def a ( self ): '''simple docstring''' return 1E-4
25
1
'''simple docstring''' import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class UpperCamelCase__ : """simple docstring""" def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : str = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) _lowerCAmelCase : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) _lowerCAmelCase : Any = DDPMScheduler( num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0 ) _lowerCAmelCase : int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : str = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) _lowerCAmelCase : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' ) torch.manual_seed(0 ) _lowerCAmelCase : int = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0 ) _lowerCAmelCase : str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , ) torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.get_dummy_components() _lowerCAmelCase : List[Any] = self.pipeline_class(**snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : str = self.get_dummy_inputs(snake_case__ ) _lowerCAmelCase : Union[str, Any] = inputs['prompt'] _lowerCAmelCase : List[str] = inputs['generator'] _lowerCAmelCase : Any = inputs['num_inference_steps'] _lowerCAmelCase : Union[str, Any] = inputs['output_type'] if "image" in inputs: _lowerCAmelCase : List[Any] = inputs['image'] else: _lowerCAmelCase : List[Any] = None if "mask_image" in inputs: _lowerCAmelCase : Optional[int] = inputs['mask_image'] else: _lowerCAmelCase : str = None if "original_image" in inputs: _lowerCAmelCase : Optional[Any] = inputs['original_image'] else: _lowerCAmelCase : str = None _lowerCAmelCase , _lowerCAmelCase : str = pipe.encode_prompt(snake_case__ ) # inputs with prompt converted to embeddings _lowerCAmelCase : List[str] = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: _lowerCAmelCase : int = image if mask_image is not None: _lowerCAmelCase : Dict = mask_image if original_image is not None: _lowerCAmelCase : Any = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(snake_case__ , snake_case__ , snake_case__ ) _lowerCAmelCase : str = pipe(**snake_case__ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(snake_case__ ) _lowerCAmelCase : Union[str, Any] = self.pipeline_class.from_pretrained(snake_case__ ) pipe_loaded.to(snake_case__ ) pipe_loaded.set_progress_bar_config(disable=snake_case__ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(snake_case__ , snake_case__ ) is None , F'`{optional_component}` did not stay set to None after loading.' , ) _lowerCAmelCase : int = self.get_dummy_inputs(snake_case__ ) _lowerCAmelCase : Any = inputs['generator'] _lowerCAmelCase : Union[str, Any] = inputs['num_inference_steps'] _lowerCAmelCase : Optional[int] = inputs['output_type'] # inputs with prompt converted to embeddings _lowerCAmelCase : Optional[int] = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: _lowerCAmelCase : str = image if mask_image is not None: _lowerCAmelCase : Dict = mask_image if original_image is not None: _lowerCAmelCase : Optional[Any] = original_image _lowerCAmelCase : Optional[Any] = pipe_loaded(**snake_case__ )[0] _lowerCAmelCase : Dict = np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max() self.assertLess(snake_case__ , 1E-4 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.get_dummy_components() _lowerCAmelCase : Tuple = self.pipeline_class(**snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Dict = self.get_dummy_inputs(snake_case__ ) _lowerCAmelCase : List[str] = pipe(**snake_case__ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(snake_case__ ) _lowerCAmelCase : Any = self.pipeline_class.from_pretrained(snake_case__ ) pipe_loaded.to(snake_case__ ) pipe_loaded.set_progress_bar_config(disable=snake_case__ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests _lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(snake_case__ ) _lowerCAmelCase : List[Any] = pipe_loaded(**snake_case__ )[0] _lowerCAmelCase : Union[str, Any] = np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max() self.assertLess(snake_case__ , 1E-4 )
25
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER _lowerCAmelCase : Optional[int] = 'pt' _lowerCAmelCase : Tuple = 'tf' def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ ) model_tf.save_pretrained(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = 'mock_framework' # Framework provided - return whatever the user provides _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(snake_case__ ): _lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase : Any = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase : int = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase : str = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): with self.assertRaises(snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
25
1
'''simple docstring''' import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging lowerCAmelCase : Dict = logging.get_logger(__name__) def lowercase (): """simple docstring""" _lowerCAmelCase : str = os.getenv('SM_HP_MP_PARAMETERS' , '{}' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. _lowerCAmelCase : Any = json.loads(_A ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. _lowerCAmelCase : Any = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". _lowerCAmelCase : Tuple = json.loads(_A ) if not mpi_options.get('sagemaker_mpi_enabled' , _A ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('smdistributed' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = field( default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , ) def a ( self ): '''simple docstring''' super().__post_init__() warnings.warn( '`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ' '`TrainingArguments` instead.' , snake_case__ , ) @cached_property def a ( self ): '''simple docstring''' logger.info('PyTorch: setting up devices' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( 'torch.distributed process group is initialized, but local_rank == -1. ' 'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' ) if self.no_cuda: _lowerCAmelCase : Any = torch.device('cpu' ) _lowerCAmelCase : List[str] = 0 elif is_sagemaker_model_parallel_available(): _lowerCAmelCase : Tuple = smp.local_rank() _lowerCAmelCase : Dict = torch.device('cuda' , snake_case__ ) _lowerCAmelCase : List[str] = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta ) _lowerCAmelCase : str = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) ) _lowerCAmelCase : Tuple = torch.device('cuda' , self.local_rank ) _lowerCAmelCase : Any = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 _lowerCAmelCase : List[str] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. _lowerCAmelCase : List[str] = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta ) _lowerCAmelCase : str = torch.device('cuda' , self.local_rank ) _lowerCAmelCase : Any = 1 if device.type == "cuda": torch.cuda.set_device(snake_case__ ) return device @property def a ( self ): '''simple docstring''' if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def a ( self ): '''simple docstring''' return not is_sagemaker_model_parallel_available() @property def a ( self ): '''simple docstring''' return False
25
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase : Optional[int] = None lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Any = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """facebook/nllb-large-en-ro""": 10_24, """facebook/nllb-200-distilled-600M""": 10_24, } # fmt: off lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = NllbTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token _lowerCAmelCase : Dict = legacy_behaviour super().__init__( vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[str] = vocab_file _lowerCAmelCase : int = False if not self.vocab_file else True _lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _lowerCAmelCase : Any = { lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn' _lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang ) _lowerCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def a ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = [self.sep_token_id] _lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _lowerCAmelCase : Optional[Any] = src_lang _lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ ) _lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ ) _lowerCAmelCase : Optional[Any] = tgt_lang_id return inputs def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[str] = src_lang _lowerCAmelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def a ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def a ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : Dict = [] _lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : int = [self.eos_token_id] _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : int = [] _lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : List[str] = [self.eos_token_id] _lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return _lowerCAmelCase : Union[str, Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
25
1
'''simple docstring''' import operator def lowercase (_A , _A = False , _A = None ): """simple docstring""" _lowerCAmelCase : Dict = operator.lt if reverse else operator.gt _lowerCAmelCase : Optional[int] = solution or [] if not arr: return solution _lowerCAmelCase : Any = [arr.pop(0 )] for i, item in enumerate(_A ): if _operator(_A , sublist[-1] ): sublist.append(_A ) arr.pop(_A ) # merging sublist into solution list if not solution: solution.extend(_A ) else: while sublist: _lowerCAmelCase : List[Any] = sublist.pop(0 ) for i, xx in enumerate(_A ): if not _operator(_A , _A ): solution.insert(_A , _A ) break else: solution.append(_A ) strand_sort(_A , _A , _A ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
25
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase : List[str] = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def lowercase (_A ): """simple docstring""" for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""") lowerCAmelCase : Dict = parser.parse_args() if args.check_lib: lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""") lowerCAmelCase : int = Path(transformers_module.__file__).parent else: lowerCAmelCase : int = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
25
1
'''simple docstring''' import os lowerCAmelCase : Tuple = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00} def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Dict = 0 while index < len(_A ) - 1: _lowerCAmelCase : List[str] = SYMBOLS[numerals[index]] _lowerCAmelCase : str = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[Any] = '' _lowerCAmelCase : Optional[Any] = num // 1_0_0_0 numerals += m_count * "M" num %= 1_0_0_0 _lowerCAmelCase : Dict = num // 1_0_0 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 1_0_0 _lowerCAmelCase : Union[str, Any] = num // 1_0 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 1_0 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def lowercase (_A = "/p089_roman.txt" ): """simple docstring""" _lowerCAmelCase : Optional[int] = 0 with open(os.path.dirname(_A ) + roman_numerals_filename ) as filea: _lowerCAmelCase : Optional[int] = filea.readlines() for line in lines: _lowerCAmelCase : Any = line.strip() _lowerCAmelCase : str = parse_roman_numerals(_A ) _lowerCAmelCase : Tuple = generate_roman_numerals(_A ) savings += len(_A ) - len(_A ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
25
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" _lowerCAmelCase : List[str] = '' _lowerCAmelCase : Any = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i _lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )] # for each character in new_string find corresponding palindromic string _lowerCAmelCase : Any = 0 for j in range(len(_A ) ): _lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowerCAmelCase : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741 _lowerCAmelCase : int = j + k - 1 # update max_length and start position if max_length < length[j]: _lowerCAmelCase : Dict = length[j] _lowerCAmelCase : Optional[int] = j # create that string _lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
25
1
'''simple docstring''' import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=None , snake_case__=True , snake_case__=None , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = parent _lowerCAmelCase : Union[str, Any] = config_class _lowerCAmelCase : int = has_text_modality _lowerCAmelCase : Optional[Any] = kwargs _lowerCAmelCase : Optional[Any] = common_properties def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.config_class(**self.inputs_dict ) _lowerCAmelCase : Dict = ( ['hidden_size', 'num_attention_heads', 'num_hidden_layers'] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['vocab_size'] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(snake_case__ , snake_case__ ) , msg=F'`{prop}` does not exist' ) # Test that config has the common properties as setter for idx, name in enumerate(snake_case__ ): try: setattr(snake_case__ , snake_case__ , snake_case__ ) self.parent.assertEqual( getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=F'`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(snake_case__ ): try: _lowerCAmelCase : Dict = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=F'`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.config_class(**self.inputs_dict ) _lowerCAmelCase : Optional[int] = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase : List[str] = os.path.join(snake_case__ , 'config.json' ) config_first.to_json_file(snake_case__ ) _lowerCAmelCase : Optional[int] = self.config_class.from_json_file(snake_case__ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(snake_case__ ) _lowerCAmelCase : int = self.config_class.from_pretrained(snake_case__ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.config_class(**self.inputs_dict ) _lowerCAmelCase : List[str] = 'test' with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase : List[str] = os.path.join(snake_case__ , snake_case__ ) config_first.save_pretrained(snake_case__ ) _lowerCAmelCase : str = self.config_class.from_pretrained(snake_case__ , subfolder=snake_case__ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) _lowerCAmelCase : int = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def a ( self ): '''simple docstring''' if self.config_class.is_composition: return _lowerCAmelCase : Any = self.config_class() self.parent.assertIsNotNone(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = copy.deepcopy(snake_case__ ) _lowerCAmelCase : List[str] = self.config_class(**snake_case__ ) _lowerCAmelCase : List[Any] = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) ) elif getattr(snake_case__ , snake_case__ ) != value: wrong_values.append((key, getattr(snake_case__ , snake_case__ ), value) ) if len(snake_case__ ) > 0: _lowerCAmelCase : List[Any] = '\n'.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] ) raise ValueError(F'The following keys were not properly set in the config:\n{errors}' ) def a ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
25
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = 0 __magic_name__ = False __magic_name__ = 3.0 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _lowerCAmelCase : str = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , snake_case__ ) @require_multi_gpu def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00) lowerCAmelCase : List[str] = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase : List[Any] = """""" lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
25
1
'''simple docstring''' import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase (_A ): """simple docstring""" return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : List[str] = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _lowerCAmelCase : int = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' ) _lowerCAmelCase : str = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' ) _lowerCAmelCase : Optional[Any] = key.replace('heads.cmd.itm_head.cls' , 'itm_head' ) _lowerCAmelCase : str = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' ) _lowerCAmelCase : List[str] = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' ) _lowerCAmelCase : int = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' ) _lowerCAmelCase : str = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' ) _lowerCAmelCase : Dict = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' ) _lowerCAmelCase : int = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' ) _lowerCAmelCase : Optional[Any] = key.replace('image_encoder.module' , 'flava.image_model' ) _lowerCAmelCase : Tuple = key.replace('text_encoder.module' , 'flava.text_model' ) _lowerCAmelCase : int = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' ) _lowerCAmelCase : int = key.replace('mm_encoder.module' , 'flava.multimodal_model' ) _lowerCAmelCase : Optional[int] = key.replace('text_projection' , 'flava.text_projection' ) _lowerCAmelCase : Dict = key.replace('image_projection' , 'flava.image_projection' ) _lowerCAmelCase : Tuple = value.float() for key, value in codebook_state_dict.items(): _lowerCAmelCase : str = value return upgrade @torch.no_grad() def lowercase (_A , _A , _A , _A=None ): """simple docstring""" if config_path is not None: _lowerCAmelCase : Optional[Any] = FlavaConfig.from_pretrained(_A ) else: _lowerCAmelCase : Optional[int] = FlavaConfig() _lowerCAmelCase : Optional[int] = FlavaForPreTraining(_A ).eval() _lowerCAmelCase : str = convert_dalle_checkpoint(_A , _A , save_checkpoint=_A ) if os.path.exists(_A ): _lowerCAmelCase : Optional[Any] = torch.load(_A , map_location='cpu' ) else: _lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(_A , map_location='cpu' ) _lowerCAmelCase : List[Any] = upgrade_state_dict(_A , _A ) hf_model.load_state_dict(_A ) _lowerCAmelCase : str = hf_model.state_dict() _lowerCAmelCase : str = count_parameters(_A ) _lowerCAmelCase : Any = count_parameters(_A ) + count_parameters(_A ) assert torch.allclose(_A , _A , atol=1E-3 ) hf_model.save_pretrained(_A ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCAmelCase : Any = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
25
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "trajectory_transformer" __magic_name__ = ["past_key_values"] __magic_name__ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = action_weight _lowerCAmelCase : Optional[int] = reward_weight _lowerCAmelCase : Union[str, Any] = value_weight _lowerCAmelCase : List[str] = max_position_embeddings _lowerCAmelCase : Tuple = block_size _lowerCAmelCase : List[Any] = action_dim _lowerCAmelCase : List[Any] = observation_dim _lowerCAmelCase : Union[str, Any] = transition_dim _lowerCAmelCase : Tuple = learning_rate _lowerCAmelCase : int = n_layer _lowerCAmelCase : Any = n_head _lowerCAmelCase : Tuple = n_embd _lowerCAmelCase : Optional[Any] = embd_pdrop _lowerCAmelCase : Union[str, Any] = attn_pdrop _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : List[Any] = layer_norm_eps _lowerCAmelCase : Union[str, Any] = kaiming_initializer_range _lowerCAmelCase : List[Any] = use_cache super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
25
1