code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _UpperCamelCase ( snake_case__ ): def __init__( self :Tuple , lowerCamelCase :NestedDataStructureLike[PathLike] , lowerCamelCase :Optional[NamedSplit] = None , lowerCamelCase :Optional[Features] = None , lowerCamelCase :str = None , lowerCamelCase :bool = False , lowerCamelCase :bool = False , lowerCamelCase :Optional[int] = None , **lowerCamelCase :Any , ) -> Optional[Any]: super().__init__( lowerCamelCase , split=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase , streaming=lowerCamelCase , num_proc=lowerCamelCase , **lowerCamelCase , ) UpperCAmelCase__ = path_or_paths if isinstance(lowerCamelCase , lowerCamelCase ) else {self.split: path_or_paths} UpperCAmelCase__ = Text( cache_dir=lowerCamelCase , data_files=lowerCamelCase , features=lowerCamelCase , **lowerCamelCase , ) def UpperCAmelCase_ ( self :int ) -> str: # Build iterable dataset if self.streaming: UpperCAmelCase__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = None self.builder.download_and_prepare( download_config=lowerCamelCase , download_mode=lowerCamelCase , verification_mode=lowerCamelCase , base_path=lowerCamelCase , num_proc=self.num_proc , ) UpperCAmelCase__ = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase , in_memory=self.keep_in_memory ) return dataset
169
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
322
0
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def A__ ( __lowerCamelCase ): if isinstance(__lowerCamelCase, collections.abc.Iterable ): return x return (x, x) @require_flax class UpperCamelCase__ : """simple docstring""" def _UpperCamelCase ( self , _A , _A ) -> Optional[int]: pass def _UpperCamelCase ( self ) -> str: pass def _UpperCamelCase ( self ) -> Tuple: pass def _UpperCamelCase ( self , _A , _A , _A ) -> Any: SCREAMING_SNAKE_CASE_ = np.abs((a - b) ).max() self.assertLessEqual(_A , _A , F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def _UpperCamelCase ( self , _A , _A , _A , _A , _A=None , **_A ) -> List[Any]: SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_A ) SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def _UpperCamelCase ( self , _A , _A , _A , _A , _A=None , **_A ) -> List[Any]: SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_A , _A ) SCREAMING_SNAKE_CASE_ = {'vision_model': vision_model, 'text_model': text_model} SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _UpperCamelCase ( self , _A , _A , _A , _A , _A=None , **_A ) -> Dict: SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_A , _A ) SCREAMING_SNAKE_CASE_ = {'vision_model': vision_model, 'text_model': text_model} SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) SCREAMING_SNAKE_CASE_ = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_A ) SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) SCREAMING_SNAKE_CASE_ = after_output[0] SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-3 ) def _UpperCamelCase ( self , _A , _A , _A , _A , _A=None , **_A ) -> Tuple: SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_A , _A ) SCREAMING_SNAKE_CASE_ = {'vision_model': vision_model, 'text_model': text_model} SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) SCREAMING_SNAKE_CASE_ = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) SCREAMING_SNAKE_CASE_ = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE_ = to_atuple(vision_model.config.image_size ) SCREAMING_SNAKE_CASE_ = to_atuple(vision_model.config.patch_size ) SCREAMING_SNAKE_CASE_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) SCREAMING_SNAKE_CASE_ = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) SCREAMING_SNAKE_CASE_ = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _UpperCamelCase ( self , _A , _A , _A ) -> Dict: pt_model.to(_A ) pt_model.eval() # prepare inputs SCREAMING_SNAKE_CASE_ = inputs_dict SCREAMING_SNAKE_CASE_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): SCREAMING_SNAKE_CASE_ = pt_model(**_A ).to_tuple() SCREAMING_SNAKE_CASE_ = fx_model(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(_A , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_A ) SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_A , from_pt=_A ) SCREAMING_SNAKE_CASE_ = fx_model_loaded(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(_A , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_A ) SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel.from_pretrained(_A , from_flax=_A ) pt_model_loaded.to(_A ) pt_model_loaded.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ = pt_model_loaded(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(_A , pt_output_loaded.numpy() , 4E-2 ) def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel(_A ) SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_A ) SCREAMING_SNAKE_CASE_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A ) SCREAMING_SNAKE_CASE_ = fx_state self.check_pt_flax_equivalence(_A , _A , _A ) def _UpperCamelCase ( self , _A , _A , _A ) -> Dict: SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel(_A ) SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_A ) SCREAMING_SNAKE_CASE_ = load_flax_weights_in_pytorch_model(_A , fx_model.params ) self.check_pt_flax_equivalence(_A , _A , _A ) def _UpperCamelCase ( self ) -> Dict: SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A ) def _UpperCamelCase ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A ) def _UpperCamelCase ( self ) -> str: SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() self.check_save_load(**_A ) def _UpperCamelCase ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A ) @is_pt_flax_cross_test def _UpperCamelCase ( self ) -> Any: SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = config_inputs_dict.pop('''vision_config''' ) SCREAMING_SNAKE_CASE_ = config_inputs_dict.pop('''text_config''' ) SCREAMING_SNAKE_CASE_ = config_inputs_dict self.check_equivalence_pt_to_flax(_A , _A , _A ) self.check_equivalence_flax_to_pt(_A , _A , _A ) @slow def _UpperCamelCase ( self ) -> Tuple: SCREAMING_SNAKE_CASE_ = self.get_pretrained_model_and_inputs() SCREAMING_SNAKE_CASE_ = model_a(**_A ) SCREAMING_SNAKE_CASE_ = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A ) SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_A ) SCREAMING_SNAKE_CASE_ = model_a(**_A ) SCREAMING_SNAKE_CASE_ = after_outputs[0] SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) @require_flax class UpperCamelCase__ ( snake_case__ , unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_A , text_from_pt=_A , ) SCREAMING_SNAKE_CASE_ = 13 SCREAMING_SNAKE_CASE_ = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) SCREAMING_SNAKE_CASE_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) SCREAMING_SNAKE_CASE_ = random_attention_mask([batch_size, 4] ) SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _UpperCamelCase ( self , _A , _A ) -> Tuple: SCREAMING_SNAKE_CASE_ = FlaxViTModel(_A ) SCREAMING_SNAKE_CASE_ = FlaxBertModel(_A ) return vision_model, text_model def _UpperCamelCase ( self ) -> int: SCREAMING_SNAKE_CASE_ = FlaxViTModelTester(self ) SCREAMING_SNAKE_CASE_ = FlaxBertModelTester(self ) SCREAMING_SNAKE_CASE_ = vit_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = vision_config_and_inputs SCREAMING_SNAKE_CASE_ = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class UpperCamelCase__ ( snake_case__ , unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_A , text_from_pt=_A , ) SCREAMING_SNAKE_CASE_ = 13 SCREAMING_SNAKE_CASE_ = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) SCREAMING_SNAKE_CASE_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) SCREAMING_SNAKE_CASE_ = random_attention_mask([batch_size, 4] ) SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _UpperCamelCase ( self , _A , _A ) -> Any: SCREAMING_SNAKE_CASE_ = FlaxCLIPVisionModel(_A ) SCREAMING_SNAKE_CASE_ = FlaxBertModel(_A ) return vision_model, text_model def _UpperCamelCase ( self ) -> Any: SCREAMING_SNAKE_CASE_ = FlaxCLIPVisionModelTester(self ) SCREAMING_SNAKE_CASE_ = FlaxBertModelTester(self ) SCREAMING_SNAKE_CASE_ = clip_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = vision_config_and_inputs SCREAMING_SNAKE_CASE_ = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def _UpperCamelCase ( self ) -> Tuple: SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 ) SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE_ = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=_A , padding=_A , return_tensors='''np''' ) SCREAMING_SNAKE_CASE_ = model(**_A ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) SCREAMING_SNAKE_CASE_ = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , _A , atol=1E-3 ) )
299
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class A_ ( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : Tuple , UpperCAmelCase : float , UpperCAmelCase : Callable , UpperCAmelCase : int , UpperCAmelCase : float = 1.0 , UpperCAmelCase : str = None , ) -> Union[str, Any]: super().__init__() __lowerCAmelCase: Optional[Any] = initial_learning_rate __lowerCAmelCase: str = warmup_steps __lowerCAmelCase: Optional[int] = power __lowerCAmelCase: str = decay_schedule_fn __lowerCAmelCase: Tuple = name def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[int]: with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. __lowerCAmelCase: List[str] = tf.cast(UpperCAmelCase , tf.floataa ) __lowerCAmelCase: Tuple = tf.cast(self.warmup_steps , tf.floataa ) __lowerCAmelCase: List[str] = global_step_float / warmup_steps_float __lowerCAmelCase: List[str] = self.initial_learning_rate * tf.math.pow(UpperCAmelCase , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCAmelCase , ) def UpperCAmelCase ( self : Tuple ) -> int: return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 0.9 , SCREAMING_SNAKE_CASE : float = 0.9_9_9 , SCREAMING_SNAKE_CASE : float = 1E-8 , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Optional[Any]: """simple docstring""" __lowerCAmelCase: Tuple = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=SCREAMING_SNAKE_CASE , ) if num_warmup_steps: __lowerCAmelCase: Optional[int] = WarmUp( initial_learning_rate=SCREAMING_SNAKE_CASE , decay_schedule_fn=SCREAMING_SNAKE_CASE , warmup_steps=SCREAMING_SNAKE_CASE , ) if weight_decay_rate > 0.0: __lowerCAmelCase: List[Any] = AdamWeightDecay( learning_rate=SCREAMING_SNAKE_CASE , weight_decay_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=SCREAMING_SNAKE_CASE , ) else: __lowerCAmelCase: Dict = tf.keras.optimizers.Adam( learning_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class A_ ( snake_case__ ): def __init__( self : Tuple , UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.999 , UpperCAmelCase : float = 1E-7 , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "AdamWeightDecay" , **UpperCAmelCase : str , ) -> int: super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) __lowerCAmelCase: List[Any] = weight_decay_rate __lowerCAmelCase: List[str] = include_in_weight_decay __lowerCAmelCase: Optional[Any] = exclude_from_weight_decay @classmethod def UpperCAmelCase ( cls : str , UpperCAmelCase : Tuple ) -> Optional[int]: __lowerCAmelCase: Union[str, Any] = {'WarmUp': WarmUp} return super(UpperCAmelCase , cls ).from_config(UpperCAmelCase , custom_objects=UpperCAmelCase ) def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Union[str, Any]: super(UpperCAmelCase , self )._prepare_local(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) __lowerCAmelCase: Union[str, Any] = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> List[str]: __lowerCAmelCase: Dict = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: __lowerCAmelCase , __lowerCAmelCase: Tuple = list(zip(*UpperCAmelCase ) ) return super(UpperCAmelCase , self ).apply_gradients(zip(UpperCAmelCase , UpperCAmelCase ) , name=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> str: if apply_state is None: return self._decayed_lr_t[var_dtype], {} __lowerCAmelCase: Dict = apply_state or {} __lowerCAmelCase: Union[str, Any] = apply_state.get((var_device, var_dtype) ) if coefficients is None: __lowerCAmelCase: str = self._fallback_apply_state(UpperCAmelCase , UpperCAmelCase ) __lowerCAmelCase: Tuple = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any]=None ) -> List[Any]: __lowerCAmelCase , __lowerCAmelCase: Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase ) __lowerCAmelCase: Optional[int] = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) with tf.control_dependencies([decay] ): return super(UpperCAmelCase , self )._resource_apply_dense(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any]=None ) -> List[str]: __lowerCAmelCase , __lowerCAmelCase: Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase ) __lowerCAmelCase: str = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) with tf.control_dependencies([decay] ): return super(UpperCAmelCase , self )._resource_apply_sparse(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]: __lowerCAmelCase: List[str] = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(UpperCAmelCase , UpperCAmelCase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(UpperCAmelCase , UpperCAmelCase ) is not None: return False return True class A_ ( snake_case__ ): def __init__( self : int ) -> List[Any]: __lowerCAmelCase: Tuple = [] __lowerCAmelCase: int = None @property def UpperCAmelCase ( self : Dict ) -> List[Any]: if self._accum_steps is None: __lowerCAmelCase: List[Any] = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def UpperCAmelCase ( self : Union[str, Any] ) -> int: if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Optional[Any] , UpperCAmelCase : Any ) -> Any: if not self._gradients: __lowerCAmelCase: Any = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(UpperCAmelCase ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(UpperCAmelCase ) != len(self._gradients ): raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCAmelCase )}''' ) for accum_gradient, gradient in zip(self._gradients , UpperCAmelCase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(UpperCAmelCase ) self._accum_steps.assign_add(1 ) def UpperCAmelCase ( self : int ) -> int: if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(UpperCAmelCase ) )
322
0
'''simple docstring''' from manim import * class _a ( snake_case__ ): def A ( self : Dict ): '''simple docstring''' UpperCAmelCase = Rectangle(height=0.5 , width=0.5 ) UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCAmelCase = [mem.copy() for i in range(6 )] UpperCAmelCase = [mem.copy() for i in range(6 )] UpperCAmelCase = VGroup(*lowercase ).arrange(lowercase , buff=0 ) UpperCAmelCase = VGroup(*lowercase ).arrange(lowercase , buff=0 ) UpperCAmelCase = VGroup(lowercase , lowercase ).arrange(lowercase , buff=0 ) UpperCAmelCase = Text('''CPU''' , font_size=24 ) UpperCAmelCase = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase ) UpperCAmelCase = [mem.copy() for i in range(1 )] UpperCAmelCase = VGroup(*lowercase ).arrange(lowercase , buff=0 ) UpperCAmelCase = Text('''GPU''' , font_size=24 ) UpperCAmelCase = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) gpu.align_to(lowercase , lowercase ) gpu.set_x(gpu.get_x() - 1 ) self.add(lowercase ) UpperCAmelCase = [mem.copy() for i in range(6 )] UpperCAmelCase = VGroup(*lowercase ).arrange(lowercase , buff=0 ) UpperCAmelCase = Text('''Model''' , font_size=24 ) UpperCAmelCase = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) model.move_to([3, -1.0, 0] ) self.play( Create(lowercase , run_time=1 ) , Create(lowercase , run_time=1 ) , Create(lowercase , run_time=1 ) , ) UpperCAmelCase = MarkupText( f"First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM." , font_size=24 , ) UpperCAmelCase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCAmelCase = MarkupText( f"<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase , run_time=2.5 ) , Write(lowercase ) , Write(lowercase ) ) self.add(lowercase ) UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = [] for i, rect in enumerate(lowercase ): UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowercase , opacity=0.7 ) cpu_target.move_to(lowercase ) cpu_target.generate_target() UpperCAmelCase = 0.46 / 4 UpperCAmelCase = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=lowercase , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowercase , buff=0.0 ) cpu_targs.append(lowercase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowercase ) ) second_animations.append(MoveToTarget(lowercase , run_time=1.5 ) ) self.play(*lowercase ) self.play(*lowercase ) self.wait()
34
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=[] ) -> str: """simple docstring""" __lowerCAmelCase: Optional[int] = size[0] - overlap_pixels * 2 __lowerCAmelCase: str = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels __lowerCAmelCase: Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55 __lowerCAmelCase: int = np.pad(SCREAMING_SNAKE_CASE , mode='linear_ramp' , pad_width=SCREAMING_SNAKE_CASE , end_values=0 ) if "l" in remove_borders: __lowerCAmelCase: Dict = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: __lowerCAmelCase: Tuple = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: __lowerCAmelCase: List[Any] = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: __lowerCAmelCase: List[str] = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]: """simple docstring""" return max(SCREAMING_SNAKE_CASE , min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] ) -> int: """simple docstring""" return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : [int] ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase: Tuple = list(SCREAMING_SNAKE_CASE ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap __lowerCAmelCase: int = clamp_rect(SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] ) return rect def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any: """simple docstring""" __lowerCAmelCase: List[Any] = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(SCREAMING_SNAKE_CASE , (original_slice, 0) ) return result def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ) -> str: """simple docstring""" __lowerCAmelCase: Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) __lowerCAmelCase: List[Any] = tile.crop(SCREAMING_SNAKE_CASE ) return tile def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]: """simple docstring""" __lowerCAmelCase: List[str] = n % d return n - divisor class A_ ( snake_case__ ): def __init__( self : Optional[Any] , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : int = 3_5_0 , ) -> Optional[Any]: super().__init__( vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , max_noise_level=UpperCAmelCase , ) def UpperCAmelCase ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> Optional[int]: torch.manual_seed(0 ) __lowerCAmelCase: Optional[int] = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) __lowerCAmelCase: Optional[Any] = add_overlap_rect(UpperCAmelCase , UpperCAmelCase , image.size ) __lowerCAmelCase: Any = image.crop(UpperCAmelCase ) __lowerCAmelCase: Any = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] __lowerCAmelCase: Tuple = translated_slice_x - (original_image_slice / 2) __lowerCAmelCase: Union[str, Any] = max(0 , UpperCAmelCase ) __lowerCAmelCase: Optional[int] = squeeze_tile(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) __lowerCAmelCase: Union[str, Any] = to_input.size __lowerCAmelCase: List[Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) __lowerCAmelCase: int = super(UpperCAmelCase , self ).__call__(image=UpperCAmelCase , **UpperCAmelCase ).images[0] __lowerCAmelCase: Dict = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) __lowerCAmelCase: Union[str, Any] = unsqueeze_tile(UpperCAmelCase , UpperCAmelCase ) __lowerCAmelCase: Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) __lowerCAmelCase: Optional[int] = [] if x == 0: remove_borders.append('l' ) elif crop_rect[2] == image.size[0]: remove_borders.append('r' ) if y == 0: remove_borders.append('t' ) elif crop_rect[3] == image.size[1]: remove_borders.append('b' ) __lowerCAmelCase: int = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCAmelCase ) , mode='L' , ) final_image.paste( UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCAmelCase ) @torch.no_grad() def __call__( self : Optional[Any] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCAmelCase : int = 7_5 , UpperCAmelCase : float = 9.0 , UpperCAmelCase : int = 5_0 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1_2_8 , UpperCAmelCase : int = 3_2 , UpperCAmelCase : int = 3_2 , ) -> str: __lowerCAmelCase: List[Any] = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) ) __lowerCAmelCase: str = math.ceil(image.size[0] / tile_size ) __lowerCAmelCase: List[Any] = math.ceil(image.size[1] / tile_size ) __lowerCAmelCase: Optional[Any] = tcx * tcy __lowerCAmelCase: Tuple = 0 for y in range(UpperCAmelCase ): for x in range(UpperCAmelCase ): self._process_tile( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , prompt=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , noise_level=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , ) current_count += 1 if callback is not None: callback({'progress': current_count / total_tile_count, 'image': final_image} ) return final_image def _a ( ) -> int: """simple docstring""" __lowerCAmelCase: Any = 'stabilityai/stable-diffusion-x4-upscaler' __lowerCAmelCase: Dict = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE , revision='fp16' , torch_dtype=torch.floataa ) __lowerCAmelCase: Optional[Any] = pipe.to('cuda' ) __lowerCAmelCase: Tuple = Image.open('../../docs/source/imgs/diffusers_library.jpg' ) def callback(SCREAMING_SNAKE_CASE : Tuple ): print(f'''progress: {obj['progress']:.4f}''' ) obj["image"].save('diffusers_library_progress.jpg' ) __lowerCAmelCase: str = pipe(image=SCREAMING_SNAKE_CASE , prompt='Black font, white background, vector' , noise_level=40 , callback=SCREAMING_SNAKE_CASE ) final_image.save('diffusers_library.jpg' ) if __name__ == "__main__": main()
322
0
'''simple docstring''' import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version _UpperCamelCase = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize _UpperCamelCase = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' _UpperCamelCase = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' _UpperCamelCase = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def _lowercase ( self : str , _a : Dict ) -> Dict: import nltk nltk.download('wordnet' ) if NLTK_VERSION >= version.Version('3.6.5' ): nltk.download('punkt' ) if NLTK_VERSION >= version.Version('3.6.6' ): nltk.download('omw-1.4' ) def _lowercase ( self : str , _a : List[str] , _a : Optional[int] , _a : Union[str, Any]=0.9 , _a : Any=3 , _a : int=0.5 ) -> Union[str, Any]: if NLTK_VERSION >= version.Version('3.6.5' ): __lowerCamelCase : Any = [ meteor_score.single_meteor_score( word_tokenize(_a ) , word_tokenize(_a ) , alpha=_a , beta=_a , gamma=_a ) for ref, pred in zip(_a , _a ) ] else: __lowerCamelCase : str = [ meteor_score.single_meteor_score(_a , _a , alpha=_a , beta=_a , gamma=_a ) for ref, pred in zip(_a , _a ) ] return {"meteor": np.mean(_a )}
208
def _a ( SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: List[Any] = sum(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: str = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __lowerCAmelCase: Tuple = True for i in range(1 , s + 1 ): __lowerCAmelCase: Any = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __lowerCAmelCase: Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __lowerCAmelCase: Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __lowerCAmelCase: Tuple = s - 2 * j break return diff
322
0
import numpy as np def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = int(np.ceil((x_end - xa) / h ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros((n + 1,) ) __SCREAMING_SNAKE_CASE : Any = ya __SCREAMING_SNAKE_CASE : Tuple = xa for k in range(snake_case ): __SCREAMING_SNAKE_CASE : Optional[int] = f(snake_case , y[k] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) __SCREAMING_SNAKE_CASE : Any = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) __SCREAMING_SNAKE_CASE : str = f(x + h , y[k] + h * ka ) __SCREAMING_SNAKE_CASE : str = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
303
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> list[int]: """simple docstring""" __lowerCAmelCase: int = 0 __lowerCAmelCase: Tuple = len(SCREAMING_SNAKE_CASE ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: __lowerCAmelCase: Tuple = i + 1 else: __lowerCAmelCase: List[str] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f"{two_pointer([2, 7, 1_1, 1_5], 9) = }")
322
0
"""simple docstring""" import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ ( snake_case__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = GPTSanJapaneseTokenizer __UpperCamelCase = False __UpperCamelCase = {'do_clean_text': False, 'add_prefix_space': False} def UpperCAmelCase__ ( self :Dict ) -> Optional[int]: super().setUp() # fmt: off UpperCAmelCase = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on UpperCAmelCase = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 UpperCAmelCase = {'unk_token': '<unk>'} UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.emoji_file , 'w' ) as emoji_writer: emoji_writer.write(json.dumps(lowercase_ ) ) def UpperCAmelCase__ ( self :int , **lowercase_ :Optional[Any] ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCAmelCase__ ( self :str , lowercase_ :List[Any] ) -> int: UpperCAmelCase = 'こんにちは、世界。 \nこんばんは、㔺界。😀' UpperCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] ) -> List[str]: UpperCAmelCase = self.get_input_output_texts(lowercase_ ) UpperCAmelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) UpperCAmelCase = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ ) return text, ids def UpperCAmelCase__ ( self :List[str] ) -> str: pass # TODO add if relevant def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]: pass # TODO add if relevant def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]: pass # TODO add if relevant def UpperCAmelCase__ ( self :Dict ) -> str: UpperCAmelCase = self.get_tokenizer() # Testing tokenization UpperCAmelCase = 'こんにちは、世界。 こんばんは、㔺界。' UpperCAmelCase = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] UpperCAmelCase = tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) # Testing conversion to ids without special tokens UpperCAmelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) # Testing conversion to ids with special tokens UpperCAmelCase = tokens + [tokenizer.unk_token] UpperCAmelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :List[str] ) -> Optional[int]: UpperCAmelCase = self.get_tokenizer() # Testing tokenization UpperCAmelCase = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' UpperCAmelCase = 'こんにちは、、、、世界。こんばんは、、、、世界。' UpperCAmelCase = tokenizer.encode(lowercase_ ) UpperCAmelCase = tokenizer.decode(lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) @slow def UpperCAmelCase__ ( self :int ) -> Optional[int]: UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization UpperCAmelCase = 'こんにちは、世界。' UpperCAmelCase = 'こんばんは、㔺界。😀' UpperCAmelCase = 'こんにちは、世界。こんばんは、世界。😀' UpperCAmelCase = tokenizer.encode(prefix_text + input_text ) UpperCAmelCase = tokenizer.encode('' , prefix_text=prefix_text + input_text ) UpperCAmelCase = tokenizer.encode(lowercase_ , prefix_text=lowercase_ ) UpperCAmelCase = tokenizer.decode(lowercase_ ) UpperCAmelCase = tokenizer.decode(lowercase_ ) UpperCAmelCase = tokenizer.decode(lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) @slow def UpperCAmelCase__ ( self :Union[str, Any] ) -> int: UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization UpperCAmelCase = 'こんにちは、世界。' UpperCAmelCase = 'こんばんは、㔺界。😀' UpperCAmelCase = len(tokenizer.encode(lowercase_ ) ) - 2 UpperCAmelCase = len(tokenizer.encode(lowercase_ ) ) - 2 UpperCAmelCase = [1] + [0] * (len_prefix + len_text + 1) UpperCAmelCase = [1] * (len_prefix + len_text + 1) + [0] UpperCAmelCase = [1] + [1] * (len_prefix) + [0] * (len_text + 1) UpperCAmelCase = tokenizer(prefix_text + input_text ).token_type_ids UpperCAmelCase = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids UpperCAmelCase = tokenizer(lowercase_ , prefix_text=lowercase_ ).token_type_ids self.assertListEqual(lowercase_ , lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) @slow def UpperCAmelCase__ ( self :List[Any] ) -> int: UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) UpperCAmelCase = tokenizer.encode('あンいワ' ) UpperCAmelCase = tokenizer.encode('' , prefix_text='あンいワ' ) UpperCAmelCase = tokenizer.encode('いワ' , prefix_text='あン' ) self.assertEqual(tokenizer.decode(lowercase_ ) , tokenizer.decode(lowercase_ ) ) self.assertEqual(tokenizer.decode(lowercase_ ) , tokenizer.decode(lowercase_ ) ) self.assertNotEqual(lowercase_ , lowercase_ ) self.assertNotEqual(lowercase_ , lowercase_ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]: UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) UpperCAmelCase = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] UpperCAmelCase = tokenizer(lowercase_ , padding=lowercase_ ) UpperCAmelCase = tokenizer.batch_encode_plus(lowercase_ , padding=lowercase_ ) # fmt: off UpperCAmelCase = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]] UpperCAmelCase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] UpperCAmelCase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , lowercase_ ) self.assertListEqual(x_token.token_type_ids , lowercase_ ) self.assertListEqual(x_token.attention_mask , lowercase_ ) self.assertListEqual(x_token_a.input_ids , lowercase_ ) self.assertListEqual(x_token_a.token_type_ids , lowercase_ ) self.assertListEqual(x_token_a.attention_mask , lowercase_ ) def UpperCAmelCase__ ( self :List[str] ) -> Union[str, Any]: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def UpperCAmelCase__ ( self :Dict ) -> str: # tokenizer has no padding token pass
78
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput _a = '''scheduler_config.json''' class A_ ( snake_case__ ): _lowercase : Optional[Any] = 1 _lowercase : Tuple = 2 _lowercase : Dict = 3 _lowercase : int = 4 _lowercase : Optional[Any] = 5 @dataclass class A_ ( snake_case__ ): _lowercase : jnp.ndarray class A_ : _lowercase : Optional[int] = SCHEDULER_CONFIG_NAME _lowercase : Dict = ['dtype'] _lowercase : int = [] _lowercase : Union[str, Any] = True @classmethod def UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Optional[int] , ) -> Tuple: __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.load_config( pretrained_model_name_or_path=UpperCAmelCase , subfolder=UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase , ) __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.from_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase ) if hasattr(UpperCAmelCase , 'create_state' ) and getattr(UpperCAmelCase , 'has_state' , UpperCAmelCase ): __lowerCAmelCase: Dict = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = False , **UpperCAmelCase : Any ) -> List[str]: self.save_config(save_directory=UpperCAmelCase , push_to_hub=UpperCAmelCase , **UpperCAmelCase ) @property def UpperCAmelCase ( self : str ) -> Dict: return self._get_compatibles() @classmethod def UpperCAmelCase ( cls : Optional[int] ) -> Any: __lowerCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __lowerCAmelCase: Dict = importlib.import_module(__name__.split('.' )[0] ) __lowerCAmelCase: Dict = [ getattr(UpperCAmelCase , UpperCAmelCase ) for c in compatible_classes_str if hasattr(UpperCAmelCase , UpperCAmelCase ) ] return compatible_classes def _a ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ) -> jnp.ndarray: """simple docstring""" assert len(SCREAMING_SNAKE_CASE ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=0.9_9_9 , SCREAMING_SNAKE_CASE : List[Any]=jnp.floataa ) -> jnp.ndarray: """simple docstring""" def alpha_bar(SCREAMING_SNAKE_CASE : str ): return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 __lowerCAmelCase: str = [] for i in range(SCREAMING_SNAKE_CASE ): __lowerCAmelCase: Union[str, Any] = i / num_diffusion_timesteps __lowerCAmelCase: List[str] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ) @flax.struct.dataclass class A_ : _lowercase : jnp.ndarray _lowercase : jnp.ndarray _lowercase : jnp.ndarray @classmethod def UpperCAmelCase ( cls : str , UpperCAmelCase : Optional[int] ) -> Any: __lowerCAmelCase: str = scheduler.config if config.trained_betas is not None: __lowerCAmelCase: Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": __lowerCAmelCase: Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __lowerCAmelCase: List[Any] = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __lowerCAmelCase: str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' ) __lowerCAmelCase: Optional[Any] = 1.0 - betas __lowerCAmelCase: Optional[Any] = jnp.cumprod(UpperCAmelCase , axis=0 ) return cls( alphas=UpperCAmelCase , betas=UpperCAmelCase , alphas_cumprod=UpperCAmelCase , ) def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> int: """simple docstring""" __lowerCAmelCase: Optional[int] = state.alphas_cumprod __lowerCAmelCase: str = alphas_cumprod[timesteps] ** 0.5 __lowerCAmelCase: Any = sqrt_alpha_prod.flatten() __lowerCAmelCase: Any = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape ) __lowerCAmelCase: Any = (1 - alphas_cumprod[timesteps]) ** 0.5 __lowerCAmelCase: str = sqrt_one_minus_alpha_prod.flatten() __lowerCAmelCase: str = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> str: """simple docstring""" __lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> Any: """simple docstring""" __lowerCAmelCase , __lowerCAmelCase: Tuple = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowerCAmelCase: int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
322
0
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : int=3 , UpperCamelCase__ : int=4 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=99 , UpperCamelCase__ : Tuple=36 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[str]=512 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Optional[Any]=6 , UpperCamelCase__ : int=6 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=1000 , ) -> int: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = num_channels __magic_name__ = image_size __magic_name__ = patch_size __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = coordinate_size __magic_name__ = shape_size __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope __magic_name__ = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __magic_name__ = text_seq_length __magic_name__ = (image_size // patch_size) ** 2 + 1 __magic_name__ = self.text_seq_length + self.image_seq_length def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __magic_name__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __magic_name__ = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __magic_name__ = bbox[i, j, 3] __magic_name__ = bbox[i, j, 1] __magic_name__ = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __magic_name__ = bbox[i, j, 2] __magic_name__ = bbox[i, j, 0] __magic_name__ = tmp_coordinate __magic_name__ = tf.constant(UpperCamelCase__ ) __magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.text_seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __magic_name__ = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _lowercase ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ) -> int: """simple docstring""" __magic_name__ = TFLayoutLMvaModel(config=UpperCamelCase__ ) # text + image __magic_name__ = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ ) __magic_name__ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , training=UpperCamelCase__ , ) __magic_name__ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __magic_name__ = model(UpperCamelCase__ , training=UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __magic_name__ = model({"""pixel_values""": pixel_values} , training=UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ) -> int: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = TFLayoutLMvaForSequenceClassification(config=UpperCamelCase__ ) __magic_name__ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> Any: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = TFLayoutLMvaForTokenClassification(config=UpperCamelCase__ ) __magic_name__ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _lowercase ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ) -> Any: """simple docstring""" __magic_name__ = 2 __magic_name__ = TFLayoutLMvaForQuestionAnswering(config=UpperCamelCase__ ) __magic_name__ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , training=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() (__magic_name__) = config_and_inputs __magic_name__ = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class UpperCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ): '''simple docstring''' a__ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) a__ = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) a__ = False a__ = False a__ = False def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> List[str]: """simple docstring""" return True def _lowercase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=False ) -> dict: """simple docstring""" __magic_name__ = copy.deepcopy(UpperCamelCase__ ) if model_class in get_values(UpperCamelCase__ ): __magic_name__ = { k: tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(UpperCamelCase__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(UpperCamelCase__ ): __magic_name__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(UpperCamelCase__ ): __magic_name__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __magic_name__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(UpperCamelCase__ ): __magic_name__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(UpperCamelCase__ ): __magic_name__ = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def _lowercase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __magic_name__ = TFLayoutLMvaModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : List[Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) if getattr(UpperCamelCase__ , """hf_compute_loss""" , UpperCamelCase__ ): # The number of elements in the loss should be the same as the number of elements in the label __magic_name__ = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ ) __magic_name__ = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCamelCase__ )[0] ] __magic_name__ = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __magic_name__ = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ ) __magic_name__ = prepared_for_class.pop("""input_ids""" ) __magic_name__ = model(UpperCamelCase__ , **UpperCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __magic_name__ = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ ) __magic_name__ = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: __magic_name__ = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __magic_name__ = -100 __magic_name__ = tf.convert_to_tensor(UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , **UpperCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __magic_name__ = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __magic_name__ = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ ) # Get keys that were added with the _prepare_for_class function __magic_name__ = prepared_for_class.keys() - inputs_dict.keys() __magic_name__ = inspect.signature(model.call ).parameters __magic_name__ = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __magic_name__ = {0: 'input_ids'} for label_key in label_keys: __magic_name__ = signature_names.index(UpperCamelCase__ ) __magic_name__ = label_key __magic_name__ = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __magic_name__ = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __magic_name__ = prepared_for_class[value] __magic_name__ = tuple(UpperCamelCase__ ) # Send to model __magic_name__ = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def _lowercase ( self : Dict ) -> Tuple: """simple docstring""" ( __magic_name__ ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Dict ) -> int: """simple docstring""" ( __magic_name__ ) = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : str ) -> List[str]: """simple docstring""" ( __magic_name__ ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : int ) -> List[str]: """simple docstring""" ( __magic_name__ ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Tuple ) -> str: """simple docstring""" ( __magic_name__ ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) @slow def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = TFLayoutLMvaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def a__ ( ): '''simple docstring''' __magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowercase ( self : int ) -> Dict: """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None @slow def _lowercase ( self : Any ) -> List[str]: """simple docstring""" __magic_name__ = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) __magic_name__ = self.default_image_processor __magic_name__ = prepare_img() __magic_name__ = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" ).pixel_values __magic_name__ = tf.constant([[1, 2]] ) __magic_name__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __magic_name__ = model(input_ids=UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ ) # verify the logits __magic_name__ = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ ) __magic_name__ = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
88
_a = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ) -> list[str]: """simple docstring""" __lowerCAmelCase: int = set() # keep track of all the paths to be checked __lowerCAmelCase: str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue __lowerCAmelCase: str = queue.pop(0 ) # get the last node from the path __lowerCAmelCase: Union[str, Any] = path[-1] if node not in explored: __lowerCAmelCase: Dict = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: __lowerCAmelCase: Dict = list(SCREAMING_SNAKE_CASE ) new_path.append(SCREAMING_SNAKE_CASE ) queue.append(SCREAMING_SNAKE_CASE ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(SCREAMING_SNAKE_CASE ) # in case there's no path between the 2 nodes return [] def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> int: """simple docstring""" if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 __lowerCAmelCase: Optional[int] = [start] __lowerCAmelCase: Dict = set(SCREAMING_SNAKE_CASE ) # Keep tab on distances from `start` node. __lowerCAmelCase: Optional[int] = {start: 0, target: -1} while queue: __lowerCAmelCase: Any = queue.pop(0 ) if node == target: __lowerCAmelCase: Optional[int] = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(SCREAMING_SNAKE_CASE ) queue.append(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
322
0
'''simple docstring''' import baseaa def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> bytes: return baseaa.baaencode(string.encode("""utf-8""" ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : bytes ) -> str: return baseaa.baadecode(_UpperCAmelCase ).decode("""utf-8""" ) if __name__ == "__main__": A__: int = '''Hello World!''' A__: str = baseaa_encode(test) print(encoded) A__: Optional[Any] = baseaa_decode(encoded) print(decoded)
276
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A_ ( snake_case__ ): _lowercase : int = ['image_processor', 'tokenizer'] _lowercase : Union[str, Any] = 'LayoutLMv3ImageProcessor' _lowercase : List[str] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Any , UpperCAmelCase : Dict=None , UpperCAmelCase : Tuple=None , **UpperCAmelCase : Optional[Any] ) -> str: __lowerCAmelCase: str = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , UpperCAmelCase , ) __lowerCAmelCase: List[Any] = kwargs.pop('feature_extractor' ) __lowerCAmelCase: Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(UpperCAmelCase , UpperCAmelCase ) def __call__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) # first, apply the image processor __lowerCAmelCase: str = self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCAmelCase , UpperCAmelCase ): __lowerCAmelCase: Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension) __lowerCAmelCase: List[str] = features['words'] __lowerCAmelCase: List[Any] = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) # add pixel values __lowerCAmelCase: Tuple = features.pop('pixel_values' ) if return_overflowing_tokens is True: __lowerCAmelCase: int = self.get_overflowing_images(UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] ) __lowerCAmelCase: str = images return encoded_inputs def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image __lowerCAmelCase: str = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCAmelCase ) != len(UpperCAmelCase ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F''' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}''' ) return images_with_overflow def UpperCAmelCase ( self : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ) -> Union[str, Any]: return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self : Any , *UpperCAmelCase : Dict , **UpperCAmelCase : Any ) -> List[str]: return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def UpperCAmelCase ( self : Union[str, Any] ) -> str: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCAmelCase ( self : str ) -> Union[str, Any]: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase , ) return self.image_processor_class @property def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase , ) return self.image_processor
322
0
'''simple docstring''' import cva import numpy as np class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , __lowercase : float , __lowercase : int ) -> Optional[Any]: if k in (0.04, 0.06): SCREAMING_SNAKE_CASE__ : Optional[Any] =k SCREAMING_SNAKE_CASE__ : List[str] =window_size else: raise ValueError('''invalid k value''' ) def __str__( self : Union[str, Any] ) -> str: return str(self.k ) def __magic_name__ ( self : Dict , __lowercase : str ) -> tuple[cva.Mat, list[list[int]]]: SCREAMING_SNAKE_CASE__ : Optional[int] =cva.imread(__lowercase , 0 ) SCREAMING_SNAKE_CASE__ : Tuple =img.shape SCREAMING_SNAKE_CASE__ : list[list[int]] =[] SCREAMING_SNAKE_CASE__ : Optional[Any] =img.copy() SCREAMING_SNAKE_CASE__ : Tuple =cva.cvtColor(__lowercase , cva.COLOR_GRAY2RGB ) SCREAMING_SNAKE_CASE__ : Optional[Any] =np.gradient(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =dx**2 SCREAMING_SNAKE_CASE__ : Union[str, Any] =dy**2 SCREAMING_SNAKE_CASE__ : Dict =dx * dy SCREAMING_SNAKE_CASE__ : Dict =0.04 SCREAMING_SNAKE_CASE__ : Optional[int] =self.window_size // 2 for y in range(__lowercase , h - offset ): for x in range(__lowercase , w - offset ): SCREAMING_SNAKE_CASE__ : str =ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE__ : List[Any] =iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE__ : str =ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE__ : Optional[int] =(wxx * wyy) - (wxy**2) SCREAMING_SNAKE_CASE__ : Any =wxx + wyy SCREAMING_SNAKE_CASE__ : Dict =det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_55 ) return color_img, corner_list if __name__ == "__main__": a_ = HarrisCorner(0.04, 3) a_ , a_ = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
152
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL _a = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''') def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : tuple , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=False , ) -> str: """simple docstring""" output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , ) else: export( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , ) @torch.no_grad() def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase: List[Any] = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __lowerCAmelCase: str = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: __lowerCAmelCase: Dict = 'cpu' __lowerCAmelCase: Optional[int] = Path(SCREAMING_SNAKE_CASE ) # VAE DECODER __lowerCAmelCase: Optional[Any] = AutoencoderKL.from_pretrained(model_path + '/vae' ) __lowerCAmelCase: Union[str, Any] = vae_decoder.config.latent_channels # forward only through the decoder part __lowerCAmelCase: Any = vae_decoder.decode onnx_export( SCREAMING_SNAKE_CASE , model_args=( torch.randn(1 , SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=SCREAMING_SNAKE_CASE , ) del vae_decoder if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument( '''--model_path''', type=str, required=True, help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''', ) parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--opset''', default=1_4, type=int, help='''The version of the ONNX operator set to use.''', ) parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''') _a = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print('''SD: Done: ONNX''')
322
0
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: lowercase__ : Optional[int] = TextaTextGenerationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) return generator, ["Something to write", "Something else"] def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> int: lowercase__ : Tuple = generator('''Something there''' ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) ) lowercase__ : Any = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ [{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}], [{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}], ] , ) lowercase__ : Dict = generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ [{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}], [{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}], ] , ) with self.assertRaises(__lowerCAmelCase ): generator(4 ) @require_torch def _lowerCAmelCase( self ) -> Any: lowercase__ : str = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' ) # do_sample=False necessary for reproducibility lowercase__ : Any = generator('''Something there''' , do_sample=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ''''''}] ) lowercase__ : Any = 3 lowercase__ : List[str] = generator( '''Something there''' , num_return_sequences=__lowerCAmelCase , num_beams=__lowerCAmelCase , ) lowercase__ : Optional[Any] = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ : str = generator('''This is a test''' , do_sample=__lowerCAmelCase , num_return_sequences=2 , return_tensors=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ {'''generated_token_ids''': ANY(torch.Tensor )}, {'''generated_token_ids''': ANY(torch.Tensor )}, ] , ) lowercase__ : int = generator.model.config.eos_token_id lowercase__ : str = '<pad>' lowercase__ : Optional[Any] = generator( ['''This is a test''', '''This is a second test'''] , do_sample=__lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCAmelCase , ) self.assertEqual( __lowerCAmelCase , [ [ {'''generated_token_ids''': ANY(torch.Tensor )}, {'''generated_token_ids''': ANY(torch.Tensor )}, ], [ {'''generated_token_ids''': ANY(torch.Tensor )}, {'''generated_token_ids''': ANY(torch.Tensor )}, ], ] , ) @require_tf def _lowerCAmelCase( self ) -> Optional[int]: lowercase__ : Union[str, Any] = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' ) # do_sample=False necessary for reproducibility lowercase__ : Optional[Any] = generator('''Something there''' , do_sample=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ''''''}] )
198
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int: """simple docstring""" def update_area_of_max_square(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 __lowerCAmelCase: Union[str, Any] = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 ) __lowerCAmelCase: Tuple = update_area_of_max_square(row + 1 , col + 1 ) __lowerCAmelCase: int = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE ) if mat[row][col]: __lowerCAmelCase: List[str] = 1 + min([right, diagonal, down] ) __lowerCAmelCase: List[str] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE ) return sub_problem_sol else: return 0 __lowerCAmelCase: List[str] = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int: """simple docstring""" def update_area_of_max_square_using_dp_array( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] __lowerCAmelCase: List[Any] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Any = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if mat[row][col]: __lowerCAmelCase: int = 1 + min([right, diagonal, down] ) __lowerCAmelCase: Union[str, Any] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Optional[int] = sub_problem_sol return sub_problem_sol else: return 0 __lowerCAmelCase: int = [0] __lowerCAmelCase: int = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )] update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE ) return largest_square_area[0] def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int: """simple docstring""" __lowerCAmelCase: int = [[0] * (cols + 1) for _ in range(rows + 1 )] __lowerCAmelCase: Optional[Any] = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __lowerCAmelCase: Union[str, Any] = dp_array[row][col + 1] __lowerCAmelCase: str = dp_array[row + 1][col + 1] __lowerCAmelCase: Optional[int] = dp_array[row + 1][col] if mat[row][col] == 1: __lowerCAmelCase: Optional[Any] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowerCAmelCase: str = max(dp_array[row][col] , SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase: Dict = 0 return largest_square_area def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int: """simple docstring""" __lowerCAmelCase: Tuple = [0] * (cols + 1) __lowerCAmelCase: Optional[int] = [0] * (cols + 1) __lowerCAmelCase: str = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __lowerCAmelCase: int = current_row[col + 1] __lowerCAmelCase: Union[str, Any] = next_row[col + 1] __lowerCAmelCase: Any = next_row[col] if mat[row][col] == 1: __lowerCAmelCase: str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowerCAmelCase: str = max(current_row[col] , SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase: Optional[Any] = 0 __lowerCAmelCase: int = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
322
0
from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class _SCREAMING_SNAKE_CASE ( snake_case__ ): def __lt__( self , lowercase ) -> Union[str, Any]: return self[-1] < other[-1] def __eq__( self , lowercase ) -> Union[str, Any]: return self[-1] == other[-1] def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = [] # sort into stacks for element in collection: lowerCamelCase_ = Stack([element] ) lowerCamelCase_ = bisect_left(lowerCamelCase__ , lowerCamelCase__ ) if i != len(lowerCamelCase__ ): stacks[i].append(lowerCamelCase__ ) else: stacks.append(lowerCamelCase__ ) # use a heap-based merge to merge stack efficiently lowerCamelCase_ = merge(*(reversed(lowerCamelCase__ ) for stack in stacks) ) return collection if __name__ == "__main__": __A =input('''Enter numbers separated by a comma:\n''').strip() __A =[int(item) for item in user_input.split(''',''')] print(patience_sort(unsorted))
19
import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py _a = '''.''' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) _a = [ '''Assert''', '''AssignVariableOp''', '''EmptyTensorList''', '''MergeV2Checkpoints''', '''ReadVariableOp''', '''ResourceGather''', '''RestoreV2''', '''SaveV2''', '''ShardedFilename''', '''StatefulPartitionedCall''', '''StaticRegexFullMatch''', '''VarHandleOp''', ] def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase: Optional[int] = SavedModel() __lowerCAmelCase: str = [] with open(os.path.join(SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f: __lowerCAmelCase: List[str] = json.load(SCREAMING_SNAKE_CASE )['opsets'] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE )] ) with open(SCREAMING_SNAKE_CASE , 'rb' ) as f: saved_model.ParseFromString(f.read() ) __lowerCAmelCase: Optional[int] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want __lowerCAmelCase: List[str] = sorted(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Optional[int] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(SCREAMING_SNAKE_CASE ) if strict and len(SCREAMING_SNAKE_CASE ) > 0: raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(SCREAMING_SNAKE_CASE ) > 0: print(f'''Found the following incompatible ops for the opset {opset}:''' ) print(*SCREAMING_SNAKE_CASE , sep='\n' ) else: print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) _a = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
322
0
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) _lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__) _lowerCAmelCase : Any = {"facebook/bart-base": BartForConditionalGeneration} _lowerCAmelCase : List[str] = {"facebook/bart-base": BartTokenizer} def lowerCAmelCase ( ): """simple docstring""" UpperCAmelCase__ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." ) parser.add_argument( "--validation_file" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length" , type=_lowerCAmelCase , default=5 , help="The maximum total input sequence length after tokenization." , ) parser.add_argument( "--num_beams" , type=_lowerCAmelCase , default=_lowerCAmelCase , help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ) , ) parser.add_argument( "--model_name_or_path" , type=_lowerCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_lowerCAmelCase , ) parser.add_argument( "--config_name" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="Pretrained config name or path if not the same as model_name" , ) parser.add_argument( "--device" , type=_lowerCAmelCase , default="cpu" , help="Device where the model will be run" , ) parser.add_argument("--output_file_path" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="Where to store the final ONNX file." ) UpperCAmelCase__ = parser.parse_args() return args def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : List[Any]="cpu" ): """simple docstring""" UpperCAmelCase__ = model_dict[model_name].from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase ) UpperCAmelCase__ = tokenizer_dict[model_name].from_pretrained(_lowerCAmelCase ) if model_name in ["facebook/bart-base"]: UpperCAmelCase__ = 0 UpperCAmelCase__ = None UpperCAmelCase__ = 0 return huggingface_model, tokenizer def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ): """simple docstring""" model.eval() UpperCAmelCase__ = None UpperCAmelCase__ = torch.jit.script(BARTBeamSearchGenerator(_lowerCAmelCase ) ) with torch.no_grad(): UpperCAmelCase__ = 'My friends are cool but they eat too many carbs.' UpperCAmelCase__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device ) UpperCAmelCase__ = model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=_lowerCAmelCase , max_length=_lowerCAmelCase , early_stopping=_lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( _lowerCAmelCase , ( inputs["input_ids"], inputs["attention_mask"], num_beams, max_length, model.config.decoder_start_token_id, ) , _lowerCAmelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={ "input_ids": {0: "batch", 1: "seq"}, "output_ids": {0: "batch", 1: "seq_out"}, } , example_outputs=_lowerCAmelCase , ) logger.info("Model exported to {}".format(_lowerCAmelCase ) ) UpperCAmelCase__ = remove_dup_initializers(os.path.abspath(_lowerCAmelCase ) ) logger.info("Deduplicated and optimized model written to {}".format(_lowerCAmelCase ) ) UpperCAmelCase__ = onnxruntime.InferenceSession(_lowerCAmelCase ) UpperCAmelCase__ = ort_sess.run( _lowerCAmelCase , { "input_ids": inputs["input_ids"].cpu().numpy(), "attention_mask": inputs["attention_mask"].cpu().numpy(), "num_beams": np.array(_lowerCAmelCase ), "max_length": np.array(_lowerCAmelCase ), "decoder_start_token_id": np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info("Model outputs from torch and ONNX Runtime are similar." ) logger.info("Success." ) def lowerCAmelCase ( ): """simple docstring""" UpperCAmelCase__ = parse_args() UpperCAmelCase__ = 5 UpperCAmelCase__ = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() UpperCAmelCase__ = torch.device(args.device ) UpperCAmelCase__ = load_model_tokenizer(args.model_name_or_path , _lowerCAmelCase ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" ) model.to(_lowerCAmelCase ) if args.max_length: UpperCAmelCase__ = args.max_length if args.num_beams: UpperCAmelCase__ = args.num_beams if args.output_file_path: UpperCAmelCase__ = args.output_file_path else: UpperCAmelCase__ = 'BART.onnx' logger.info("Exporting model to ONNX" ) export_and_validate_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
169
import math import qiskit def _a ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 ) -> qiskit.result.counts.Counts: """simple docstring""" if ( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ): raise TypeError('inputs must be integers.' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('inputs must be positive.' ) if ( (math.floor(SCREAMING_SNAKE_CASE ) != input_a) or (math.floor(SCREAMING_SNAKE_CASE ) != input_a) or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in) ): raise ValueError('inputs must be exact integers.' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('inputs must be less or equal to 2.' ) # build registers __lowerCAmelCase: Union[str, Any] = qiskit.QuantumRegister(4 , 'qr' ) __lowerCAmelCase: List[Any] = qiskit.ClassicalRegister(2 , 'cr' ) # list the entries __lowerCAmelCase: Any = [input_a, input_a, carry_in] __lowerCAmelCase: List[str] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits __lowerCAmelCase: List[str] = qiskit.Aer.get_backend('aer_simulator' ) __lowerCAmelCase: List[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 ) return job.result().get_counts(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
322
0
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( "pipelines_utils", "0.22.0", "Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.", standard_warn=False, stacklevel=3, )
299
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class A_ : def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=3 , UpperCAmelCase : int=4 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]=9_9 , UpperCAmelCase : Tuple=3_6 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=3_7 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : int=1_6 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : int=6 , UpperCAmelCase : str=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=1_0_0_0 , ) -> int: __lowerCAmelCase: List[str] = parent __lowerCAmelCase: List[str] = batch_size __lowerCAmelCase: Optional[Any] = num_channels __lowerCAmelCase: Tuple = image_size __lowerCAmelCase: str = patch_size __lowerCAmelCase: List[str] = is_training __lowerCAmelCase: Union[str, Any] = use_input_mask __lowerCAmelCase: Union[str, Any] = use_token_type_ids __lowerCAmelCase: Tuple = use_labels __lowerCAmelCase: Optional[int] = vocab_size __lowerCAmelCase: Any = hidden_size __lowerCAmelCase: Tuple = num_hidden_layers __lowerCAmelCase: Optional[int] = num_attention_heads __lowerCAmelCase: Dict = intermediate_size __lowerCAmelCase: Union[str, Any] = hidden_act __lowerCAmelCase: str = hidden_dropout_prob __lowerCAmelCase: str = attention_probs_dropout_prob __lowerCAmelCase: str = max_position_embeddings __lowerCAmelCase: str = type_vocab_size __lowerCAmelCase: Optional[Any] = type_sequence_label_size __lowerCAmelCase: Union[str, Any] = initializer_range __lowerCAmelCase: List[str] = coordinate_size __lowerCAmelCase: Tuple = shape_size __lowerCAmelCase: List[Any] = num_labels __lowerCAmelCase: Any = num_choices __lowerCAmelCase: List[str] = scope __lowerCAmelCase: Dict = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __lowerCAmelCase: Optional[Any] = text_seq_length __lowerCAmelCase: List[Any] = (image_size // patch_size) ** 2 + 1 __lowerCAmelCase: int = self.text_seq_length + self.image_seq_length def UpperCAmelCase ( self : Any ) -> Any: __lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __lowerCAmelCase: Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __lowerCAmelCase: str = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __lowerCAmelCase: Optional[Any] = bbox[i, j, 3] __lowerCAmelCase: Tuple = bbox[i, j, 1] __lowerCAmelCase: Dict = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __lowerCAmelCase: Any = bbox[i, j, 2] __lowerCAmelCase: int = bbox[i, j, 0] __lowerCAmelCase: int = tmp_coordinate __lowerCAmelCase: List[Any] = tf.constant(UpperCAmelCase ) __lowerCAmelCase: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase: Union[str, Any] = None if self.use_input_mask: __lowerCAmelCase: List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) __lowerCAmelCase: int = None if self.use_token_type_ids: __lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __lowerCAmelCase: str = None __lowerCAmelCase: Dict = None if self.use_labels: __lowerCAmelCase: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __lowerCAmelCase: Dict = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> int: __lowerCAmelCase: Tuple = TFLayoutLMvaModel(config=UpperCAmelCase ) # text + image __lowerCAmelCase: Dict = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase ) __lowerCAmelCase: List[str] = model( UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , ) __lowerCAmelCase: Optional[Any] = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __lowerCAmelCase: str = model(UpperCAmelCase , training=UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __lowerCAmelCase: List[str] = model({'pixel_values': pixel_values} , training=UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> int: __lowerCAmelCase: List[str] = self.num_labels __lowerCAmelCase: Tuple = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase ) __lowerCAmelCase: int = model( UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Any: __lowerCAmelCase: Union[str, Any] = self.num_labels __lowerCAmelCase: List[str] = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase ) __lowerCAmelCase: Any = model( UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Any: __lowerCAmelCase: str = 2 __lowerCAmelCase: Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase ) __lowerCAmelCase: int = model( UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: __lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs() ((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)): List[str] = config_and_inputs __lowerCAmelCase: List[str] = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class A_ ( snake_case__ , snake_case__ , unittest.TestCase ): _lowercase : List[Any] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) _lowercase : Tuple = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) _lowercase : Union[str, Any] = False _lowercase : Dict = False _lowercase : Tuple = False def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> List[str]: return True def UpperCAmelCase ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=False ) -> dict: __lowerCAmelCase: Optional[Any] = copy.deepcopy(UpperCAmelCase ) if model_class in get_values(UpperCAmelCase ): __lowerCAmelCase: int = { k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(UpperCAmelCase ): __lowerCAmelCase: Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(UpperCAmelCase ): __lowerCAmelCase: Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(UpperCAmelCase ): __lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(UpperCAmelCase ): __lowerCAmelCase: str = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: __lowerCAmelCase: Tuple = TFLayoutLMvaModelTester(self ) __lowerCAmelCase: str = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 ) def UpperCAmelCase ( self : Tuple ) -> Dict: self.config_tester.run_common_tests() def UpperCAmelCase ( self : List[Any] ) -> Tuple: __lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase: List[Any] = model_class(UpperCAmelCase ) if getattr(UpperCAmelCase , 'hf_compute_loss' , UpperCAmelCase ): # The number of elements in the loss should be the same as the number of elements in the label __lowerCAmelCase: Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase ) __lowerCAmelCase: List[Any] = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0] ] __lowerCAmelCase: Tuple = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __lowerCAmelCase: Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase ) __lowerCAmelCase: Tuple = prepared_for_class.pop('input_ids' ) __lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , **UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __lowerCAmelCase: Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase ) __lowerCAmelCase: Optional[int] = prepared_for_class.pop('input_ids' ) if "labels" in prepared_for_class: __lowerCAmelCase: str = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __lowerCAmelCase: Tuple = -1_0_0 __lowerCAmelCase: Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase ) __lowerCAmelCase: Dict = model(UpperCAmelCase , **UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __lowerCAmelCase: str = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase ) __lowerCAmelCase: Optional[Any] = model(UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __lowerCAmelCase: Any = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase ) # Get keys that were added with the _prepare_for_class function __lowerCAmelCase: Tuple = prepared_for_class.keys() - inputs_dict.keys() __lowerCAmelCase: Dict = inspect.signature(model.call ).parameters __lowerCAmelCase: Dict = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __lowerCAmelCase: str = {0: 'input_ids'} for label_key in label_keys: __lowerCAmelCase: Optional[Any] = signature_names.index(UpperCAmelCase ) __lowerCAmelCase: Tuple = label_key __lowerCAmelCase: Tuple = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __lowerCAmelCase: List[Any] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __lowerCAmelCase: Optional[Any] = prepared_for_class[value] __lowerCAmelCase: Union[str, Any] = tuple(UpperCAmelCase ) # Send to model __lowerCAmelCase: Any = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def UpperCAmelCase ( self : Dict ) -> Tuple: ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ): str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( self : Dict ) -> int: ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ): str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCAmelCase: Tuple = type self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( self : str ) -> List[str]: ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ): Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( self : int ) -> List[str]: ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ): Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( self : Tuple ) -> str: ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ): Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) @slow def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase: Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def _a ( ) -> Any: """simple docstring""" __lowerCAmelCase: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class A_ ( unittest.TestCase ): @cached_property def UpperCAmelCase ( self : int ) -> Dict: return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None @slow def UpperCAmelCase ( self : Any ) -> List[str]: __lowerCAmelCase: Any = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ) __lowerCAmelCase: Tuple = self.default_image_processor __lowerCAmelCase: str = prepare_img() __lowerCAmelCase: Optional[int] = image_processor(images=UpperCAmelCase , return_tensors='tf' ).pixel_values __lowerCAmelCase: Dict = tf.constant([[1, 2]] ) __lowerCAmelCase: str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __lowerCAmelCase: List[str] = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase ) # verify the logits __lowerCAmelCase: Tuple = (1, 1_9_9, 7_6_8) self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase ) __lowerCAmelCase: str = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
322
0
'''simple docstring''' import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A =logging.get_logger(__name__) A ='▁' A ={'vocab_file': 'prophetnet.tokenizer'} A ={ 'vocab_file': { 'microsoft/xprophetnet-large-wiki100-cased': ( 'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer' ), } } A ={ 'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False}, } A ={ 'microsoft/xprophetnet-large-wiki100-cased': 5_12, } def snake_case_ (_a : List[Any] ): UpperCAmelCase = collections.OrderedDict() with open(_a , '''r''' , encoding='''utf-8''' ) as reader: UpperCAmelCase = reader.readlines() for index, token in enumerate(_a ): UpperCAmelCase = token.rstrip('''\n''' ) UpperCAmelCase = index return vocab class _a ( snake_case__ ): __a : Dict = VOCAB_FILES_NAMES __a : List[Any] = PRETRAINED_VOCAB_FILES_MAP __a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Union[str, Any] = ['input_ids', 'attention_mask'] def __init__( self : List[str] , lowercase : str , lowercase : str="[SEP]" , lowercase : Tuple="[SEP]" , lowercase : Optional[int]="[SEP]" , lowercase : int="[UNK]" , lowercase : Optional[int]="[PAD]" , lowercase : int="[CLS]" , lowercase : int="[MASK]" , lowercase : Optional[Dict[str, Any]] = None , **lowercase : str , ): '''simple docstring''' UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , ) try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowercase ) ) UpperCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab UpperCAmelCase = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4} for i in range(10 ): UpperCAmelCase = f"[unused{i}]" UpperCAmelCase = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab UpperCAmelCase = 12 UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(lowercase ) def __getstate__( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.__dict__.copy() UpperCAmelCase = None return state def __setstate__( self : int , lowercase : List[Any] ): '''simple docstring''' UpperCAmelCase = d try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase = {} UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A ( self : int , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase ) if token_ids_a is None: return ([0] * len(lowercase )) + [1] return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1] def A ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def A ( self : Any ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset def A ( self : Any ): '''simple docstring''' UpperCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A ( self : List[Any] , lowercase : str ): '''simple docstring''' return self.sp_model.encode(lowercase , out_type=lowercase ) def A ( self : int , lowercase : Union[str, Any] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCAmelCase = self.sp_model.PieceToId(lowercase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A ( self : List[str] , lowercase : int ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def A ( self : Optional[Any] , lowercase : int ): '''simple docstring''' UpperCAmelCase = ''.join(lowercase ).replace(lowercase , ''' ''' ).strip() return out_string def A ( self : Optional[int] , lowercase : str , lowercase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowercase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase = os.path.join( lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase ) elif not os.path.isfile(self.vocab_file ): with open(lowercase , '''wb''' ) as fi: UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(lowercase ) return (out_vocab_file,) def A ( self : str , lowercase : List[int] , lowercase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.sep_token_id] UpperCAmelCase = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
34
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class A_ ( unittest.TestCase ): def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]: __lowerCAmelCase: str = parent __lowerCAmelCase: Dict = batch_size __lowerCAmelCase: Optional[int] = seq_length __lowerCAmelCase: Dict = is_training __lowerCAmelCase: Optional[Any] = use_attention_mask __lowerCAmelCase: List[Any] = use_token_type_ids __lowerCAmelCase: Optional[int] = use_labels __lowerCAmelCase: Optional[Any] = vocab_size __lowerCAmelCase: Optional[Any] = hidden_size __lowerCAmelCase: Tuple = num_hidden_layers __lowerCAmelCase: List[str] = num_attention_heads __lowerCAmelCase: int = intermediate_size __lowerCAmelCase: Union[str, Any] = hidden_act __lowerCAmelCase: List[Any] = hidden_dropout_prob __lowerCAmelCase: List[str] = attention_probs_dropout_prob __lowerCAmelCase: Optional[int] = max_position_embeddings __lowerCAmelCase: Union[str, Any] = type_vocab_size __lowerCAmelCase: int = type_sequence_label_size __lowerCAmelCase: Union[str, Any] = initializer_range __lowerCAmelCase: Any = num_choices def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: __lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase: List[Any] = None if self.use_attention_mask: __lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase: Optional[Any] = None if self.use_token_type_ids: __lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase: Optional[int] = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase ( self : Dict ) -> Any: __lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs __lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class A_ ( snake_case__ , unittest.TestCase ): _lowercase : Dict = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def UpperCAmelCase ( self : List[str] ) -> Optional[int]: __lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self ) @slow def UpperCAmelCase ( self : Tuple ) -> Dict: for model_class_name in self.all_model_classes: __lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' ) __lowerCAmelCase: Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase ) @require_flax class A_ ( unittest.TestCase ): @slow def UpperCAmelCase ( self : Any ) -> Any: __lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' ) __lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0] __lowerCAmelCase: str = (1, 1_1, 7_6_8) self.assertEqual(output.shape , UpperCAmelCase ) __lowerCAmelCase: List[str] = np.array( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
322
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def a_ ( _lowerCAmelCase ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ) -> Optional[Any]: __lowerCamelCase : Optional[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'transformer.blocks.{i}.norm1.weight', F'vilt.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'transformer.blocks.{i}.norm1.bias', F'vilt.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (F'transformer.blocks.{i}.attn.proj.weight', F'vilt.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (F'transformer.blocks.{i}.attn.proj.bias', F'vilt.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'transformer.blocks.{i}.norm2.weight', F'vilt.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'transformer.blocks.{i}.norm2.bias', F'vilt.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (F'transformer.blocks.{i}.mlp.fc1.weight', F'vilt.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc1.bias', F'vilt.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.weight', F'vilt.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.bias', F'vilt.encoder.layer.{i}.output.dense.bias') ) # embeddings rename_keys.extend( [ # text embeddings ('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'), ( 'text_embeddings.position_embeddings.weight', 'vilt.embeddings.text_embeddings.position_embeddings.weight', ), ('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'), ( 'text_embeddings.token_type_embeddings.weight', 'vilt.embeddings.text_embeddings.token_type_embeddings.weight', ), ('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'), ('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'), # patch embeddings ('transformer.cls_token', 'vilt.embeddings.cls_token'), ('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'), ('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'), ('transformer.pos_embed', 'vilt.embeddings.position_embeddings'), # token type embeddings ('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'), ] ) # final layernorm + pooler rename_keys.extend( [ ('transformer.norm.weight', 'vilt.layernorm.weight'), ('transformer.norm.bias', 'vilt.layernorm.bias'), ('pooler.dense.weight', 'vilt.pooler.dense.weight'), ('pooler.dense.bias', 'vilt.pooler.dense.bias'), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('vqa_classifier.0.weight', 'classifier.0.weight'), ('vqa_classifier.0.bias', 'classifier.0.bias'), ('vqa_classifier.1.weight', 'classifier.1.weight'), ('vqa_classifier.1.bias', 'classifier.1.bias'), ('vqa_classifier.3.weight', 'classifier.3.weight'), ('vqa_classifier.3.bias', 'classifier.3.bias'), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('nlvr2_classifier.0.weight', 'classifier.0.weight'), ('nlvr2_classifier.0.bias', 'classifier.0.bias'), ('nlvr2_classifier.1.weight', 'classifier.1.weight'), ('nlvr2_classifier.1.bias', 'classifier.1.bias'), ('nlvr2_classifier.3.weight', 'classifier.3.weight'), ('nlvr2_classifier.3.bias', 'classifier.3.bias'), ] ) else: pass return rename_keys def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> int: for i in range(config.num_hidden_layers ): __lowerCamelCase : str = 'vilt.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowerCamelCase : Tuple = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.weight' ) __lowerCamelCase : Optional[int] = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowerCamelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] __lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size] __lowerCamelCase : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowerCamelCase : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowerCamelCase : List[str] = in_proj_weight[ -config.hidden_size :, : ] __lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :] def a_ ( _lowerCAmelCase ) -> Optional[int]: __lowerCamelCase : Union[str, Any] = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(_lowerCAmelCase ,_lowerCAmelCase ) def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> List[str]: __lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase ) __lowerCamelCase : Dict = val @torch.no_grad() def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Tuple: __lowerCamelCase : Union[str, Any] = ViltConfig(image_size=384 ,patch_size=32 ,tie_word_embeddings=_lowerCAmelCase ) __lowerCamelCase : Any = False __lowerCamelCase : int = False __lowerCamelCase : Union[str, Any] = False __lowerCamelCase : Union[str, Any] = False if "vqa" in checkpoint_url: __lowerCamelCase : int = True __lowerCamelCase : Any = 3129 __lowerCamelCase : int = 'huggingface/label-files' __lowerCamelCase : Tuple = 'vqa2-id2label.json' __lowerCamelCase : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase ,_lowerCAmelCase ,repo_type='dataset' ) ,'r' ) ) __lowerCamelCase : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} __lowerCamelCase : str = idalabel __lowerCamelCase : int = {v: k for k, v in idalabel.items()} __lowerCamelCase : Optional[int] = ViltForQuestionAnswering(_lowerCAmelCase ) elif "nlvr" in checkpoint_url: __lowerCamelCase : Dict = True __lowerCamelCase : str = 2 __lowerCamelCase : Optional[Any] = {0: 'False', 1: 'True'} __lowerCamelCase : int = {v: k for k, v in config.idalabel.items()} __lowerCamelCase : Optional[int] = 3 __lowerCamelCase : List[str] = ViltForImagesAndTextClassification(_lowerCAmelCase ) elif "irtr" in checkpoint_url: __lowerCamelCase : int = True __lowerCamelCase : Optional[Any] = ViltForImageAndTextRetrieval(_lowerCAmelCase ) elif "mlm_itm" in checkpoint_url: __lowerCamelCase : Dict = True __lowerCamelCase : Optional[Any] = ViltForMaskedLM(_lowerCAmelCase ) else: raise ValueError('Unknown model type' ) # load state_dict of original model, remove and rename some keys __lowerCamelCase : Tuple = torch.hub.load_state_dict_from_url(_lowerCAmelCase ,map_location='cpu' )['state_dict'] __lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase ,_lowerCAmelCase ) if mlm_model or irtr_model: __lowerCamelCase : Any = ['itm_score.fc.weight', 'itm_score.fc.bias'] for k in ignore_keys: state_dict.pop(_lowerCAmelCase ,_lowerCAmelCase ) # load state dict into HuggingFace model model.eval() if mlm_model: __lowerCamelCase : Optional[Any] = model.load_state_dict(_lowerCAmelCase ,strict=_lowerCAmelCase ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(_lowerCAmelCase ) # Define processor __lowerCamelCase : List[str] = ViltImageProcessor(size=384 ) __lowerCamelCase : List[str] = BertTokenizer.from_pretrained('bert-base-uncased' ) __lowerCamelCase : List[str] = ViltProcessor(_lowerCAmelCase ,_lowerCAmelCase ) # Forward pass on example inputs (image + text) if nlvr_model: __lowerCamelCase : Tuple = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' ,stream=_lowerCAmelCase ).raw ) __lowerCamelCase : List[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' ,stream=_lowerCAmelCase ).raw ) __lowerCamelCase : Union[str, Any] = ( 'The left image contains twice the number of dogs as the right image, and at least two dogs in total are' ' standing.' ) __lowerCamelCase : Tuple = processor(_lowerCAmelCase ,_lowerCAmelCase ,return_tensors='pt' ) __lowerCamelCase : List[Any] = processor(_lowerCAmelCase ,_lowerCAmelCase ,return_tensors='pt' ) __lowerCamelCase : str = model( input_ids=encoding_a.input_ids ,pixel_values=encoding_a.pixel_values ,pixel_values_a=encoding_a.pixel_values ,) else: __lowerCamelCase : Dict = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' ,stream=_lowerCAmelCase ).raw ) if mlm_model: __lowerCamelCase : Optional[int] = 'a bunch of [MASK] laying on a [MASK].' else: __lowerCamelCase : Optional[Any] = 'How many cats are there?' __lowerCamelCase : Optional[Any] = processor(_lowerCAmelCase ,_lowerCAmelCase ,return_tensors='pt' ) __lowerCamelCase : Optional[Any] = model(**_lowerCAmelCase ) # Verify outputs if mlm_model: __lowerCamelCase : int = torch.Size([1, 11, 30522] ) __lowerCamelCase : int = torch.tensor([-12.5061, -12.5123, -12.5174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] ,_lowerCAmelCase ,atol=1E-4 ) # verify masked token prediction equals "cats" __lowerCamelCase : Optional[Any] = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: __lowerCamelCase : str = torch.Size([1, 3129] ) __lowerCamelCase : Dict = torch.tensor([-15.9495, -18.1472, -10.3041] ) assert torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] ,_lowerCAmelCase ,atol=1E-4 ) # verify vqa prediction equals "2" __lowerCamelCase : Optional[Any] = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: __lowerCamelCase : Dict = torch.Size([1, 2] ) __lowerCamelCase : Optional[Any] = torch.tensor([-2.8721, 2.1291] ) assert torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(F'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) _UpperCamelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
208
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 _a = { '''return_dict''': False, '''output_hidden_states''': True, '''output_attentions''': True, '''torchscript''': True, '''torch_dtype''': '''float16''', '''use_bfloat16''': True, '''tf_legacy_loss''': True, '''pruned_heads''': {'''a''': 1}, '''tie_word_embeddings''': False, '''is_decoder''': True, '''cross_attention_hidden_size''': 1_2_8, '''add_cross_attention''': True, '''tie_encoder_decoder''': True, '''max_length''': 5_0, '''min_length''': 3, '''do_sample''': True, '''early_stopping''': True, '''num_beams''': 3, '''num_beam_groups''': 3, '''diversity_penalty''': 0.5, '''temperature''': 2.0, '''top_k''': 1_0, '''top_p''': 0.7, '''typical_p''': 0.2, '''repetition_penalty''': 0.8, '''length_penalty''': 0.8, '''no_repeat_ngram_size''': 5, '''encoder_no_repeat_ngram_size''': 5, '''bad_words_ids''': [1, 2, 3], '''num_return_sequences''': 3, '''chunk_size_feed_forward''': 5, '''output_scores''': True, '''return_dict_in_generate''': True, '''forced_bos_token_id''': 2, '''forced_eos_token_id''': 3, '''remove_invalid_values''': True, '''architectures''': ['''BertModel'''], '''finetuning_task''': '''translation''', '''id2label''': {0: '''label'''}, '''label2id''': {'''label''': '''0'''}, '''tokenizer_class''': '''BertTokenizerFast''', '''prefix''': '''prefix''', '''bos_token_id''': 6, '''pad_token_id''': 7, '''eos_token_id''': 8, '''sep_token_id''': 9, '''decoder_start_token_id''': 1_0, '''exponential_decay_length_penalty''': (5, 1.01), '''suppress_tokens''': [0, 1], '''begin_suppress_tokens''': 2, '''task_specific_params''': {'''translation''': '''some_params'''}, '''problem_type''': '''regression''', } @is_staging_test class A_ ( unittest.TestCase ): @classmethod def UpperCAmelCase ( cls : Dict ) -> List[str]: __lowerCAmelCase: str = TOKEN HfFolder.save_token(UpperCAmelCase ) @classmethod def UpperCAmelCase ( cls : str ) -> List[Any]: try: delete_repo(token=cls._token , repo_id='test-config' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-config-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-config' ) except HTTPError: pass def UpperCAmelCase ( self : int ) -> Optional[int]: __lowerCAmelCase: Any = BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) config.push_to_hub('test-config' , use_auth_token=self._token ) __lowerCAmelCase: str = BertConfig.from_pretrained(F'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='test-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(UpperCAmelCase , repo_id='test-config' , push_to_hub=UpperCAmelCase , use_auth_token=self._token ) __lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) ) def UpperCAmelCase ( self : int ) -> Dict: __lowerCAmelCase: int = BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token ) __lowerCAmelCase: Dict = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( UpperCAmelCase , repo_id='valid_org/test-config-org' , push_to_hub=UpperCAmelCase , use_auth_token=self._token ) __lowerCAmelCase: int = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) ) def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]: CustomConfig.register_for_auto_class() __lowerCAmelCase: Any = CustomConfig(attribute=4_2 ) config.push_to_hub('test-dynamic-config' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} ) __lowerCAmelCase: int = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCAmelCase ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' ) self.assertEqual(new_config.attribute , 4_2 ) class A_ ( unittest.TestCase ): def UpperCAmelCase ( self : Union[str, Any] ) -> int: __lowerCAmelCase: List[Any] = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated __lowerCAmelCase: Union[str, Any] = c.n_embd + 1 # int __lowerCAmelCase: str = c.resid_pdrop + 1.0 # float __lowerCAmelCase: List[Any] = not c.scale_attn_weights # bool __lowerCAmelCase: List[str] = c.summary_type + 'foo' # str c.update_from_string( F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(UpperCAmelCase , c.n_embd , 'mismatch for key: n_embd' ) self.assertEqual(UpperCAmelCase , c.resid_pdrop , 'mismatch for key: resid_pdrop' ) self.assertEqual(UpperCAmelCase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' ) self.assertEqual(UpperCAmelCase , c.summary_type , 'mismatch for key: summary_type' ) def UpperCAmelCase ( self : Optional[Any] ) -> Any: __lowerCAmelCase: str = PretrainedConfig() __lowerCAmelCase: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( UpperCAmelCase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] ) __lowerCAmelCase: int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase , UpperCAmelCase )] if len(UpperCAmelCase ) > 0: raise ValueError( 'The following keys are set with the default values in' ' `test_configuration_common.config_common_kwargs` pick another value for them:' F''' {', '.join(UpperCAmelCase )}.''' ) def UpperCAmelCase ( self : int ) -> Optional[Any]: with self.assertRaises(UpperCAmelCase ): # config is in subfolder, the following should not work without specifying the subfolder __lowerCAmelCase: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ) __lowerCAmelCase: List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' ) self.assertIsNotNone(UpperCAmelCase ) def UpperCAmelCase ( self : Tuple ) -> List[Any]: # A mock response for an HTTP head request to emulate server down __lowerCAmelCase: Union[str, Any] = mock.Mock() __lowerCAmelCase: str = 5_0_0 __lowerCAmelCase: Optional[Any] = {} __lowerCAmelCase: Optional[int] = HTTPError __lowerCAmelCase: List[Any] = {} # Download this model to make sure it's in the cache. __lowerCAmelCase: Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=UpperCAmelCase ) as mock_head: __lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase ( self : Any ) -> Optional[Any]: # This test is for deprecated behavior and can be removed in v5 __lowerCAmelCase: Tuple = BertConfig.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' ) def UpperCAmelCase ( self : Dict ) -> str: __lowerCAmelCase: Optional[Any] = AutoConfig.from_pretrained('bert-base-cased' ) __lowerCAmelCase: Optional[Any] = ['config.4.0.0.json'] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(UpperCAmelCase ) __lowerCAmelCase: Tuple = 2 json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , 'w' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 __lowerCAmelCase: Dict = AutoConfig.from_pretrained(UpperCAmelCase ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 __lowerCAmelCase: Dict = ['config.42.0.0.json'] __lowerCAmelCase: Optional[int] = 7_6_8 configuration.save_pretrained(UpperCAmelCase ) shutil.move(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , os.path.join(UpperCAmelCase , 'config.42.0.0.json' ) ) __lowerCAmelCase: int = AutoConfig.from_pretrained(UpperCAmelCase ) self.assertEqual(new_configuration.hidden_size , 7_6_8 ) def UpperCAmelCase ( self : Union[str, Any] ) -> Dict: # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. __lowerCAmelCase: Tuple = 'hf-internal-testing/test-two-configs' import transformers as new_transformers __lowerCAmelCase: List[Any] = 'v4.0.0' __lowerCAmelCase , __lowerCAmelCase: Any = new_transformers.models.auto.AutoConfig.from_pretrained( UpperCAmelCase , return_unused_kwargs=UpperCAmelCase ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(UpperCAmelCase , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers __lowerCAmelCase: List[Any] = 'v3.0.0' __lowerCAmelCase: Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase ) self.assertEqual(old_configuration.hidden_size , 7_6_8 )
322
0
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput lowercase_ = """scheduler_config.json""" class __UpperCamelCase ( snake_case__ ): """simple docstring""" lowerCAmelCase_ = 1 lowerCAmelCase_ = 2 lowerCAmelCase_ = 3 lowerCAmelCase_ = 4 lowerCAmelCase_ = 5 @dataclass class __UpperCamelCase ( snake_case__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = SCHEDULER_CONFIG_NAME lowerCAmelCase_ = ['dtype'] lowerCAmelCase_ = [] lowerCAmelCase_ = True @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , _A : Dict[str, Any] = None , _A : Optional[str] = None , _A : List[str]=False , **_A : Optional[int] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = cls.load_config( pretrained_model_name_or_path=_A , subfolder=_A , return_unused_kwargs=_A , **_A , ) __SCREAMING_SNAKE_CASE : Optional[Any] = cls.from_config(_A , return_unused_kwargs=_A , **_A ) if hasattr(_A , '''create_state''' ) and getattr(_A , '''has_state''' , _A ): __SCREAMING_SNAKE_CASE : Dict = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def UpperCAmelCase__ ( self : Tuple , _A : Union[str, os.PathLike] , _A : bool = False , **_A : Any ): """simple docstring""" self.save_config(save_directory=_A , push_to_hub=_A , **_A ) @property def UpperCAmelCase__ ( self : str ): """simple docstring""" return self._get_compatibles() @classmethod def UpperCAmelCase__ ( cls : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __SCREAMING_SNAKE_CASE : Dict = importlib.import_module(__name__.split('''.''' )[0] ) __SCREAMING_SNAKE_CASE : Dict = [ getattr(_A , _A ) for c in compatible_classes_str if hasattr(_A , _A ) ] return compatible_classes def a__ ( snake_case , snake_case ): """simple docstring""" assert len(snake_case ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(snake_case ) - x.ndim) ) , snake_case ) def a__ ( snake_case , snake_case=0.999 , snake_case=jnp.floataa ): """simple docstring""" def alpha_bar(snake_case ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 __SCREAMING_SNAKE_CASE : str = [] for i in range(snake_case ): __SCREAMING_SNAKE_CASE : Union[str, Any] = i / num_diffusion_timesteps __SCREAMING_SNAKE_CASE : List[str] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(snake_case ) / alpha_bar(snake_case ) , snake_case ) ) return jnp.array(snake_case , dtype=snake_case ) @flax.struct.dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 @classmethod def UpperCAmelCase__ ( cls : str , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = scheduler.config if config.trained_betas is not None: __SCREAMING_SNAKE_CASE : Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": __SCREAMING_SNAKE_CASE : Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __SCREAMING_SNAKE_CASE : List[Any] = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __SCREAMING_SNAKE_CASE : str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = 1.0 - betas __SCREAMING_SNAKE_CASE : Optional[Any] = jnp.cumprod(_A , axis=0 ) return cls( alphas=_A , betas=_A , alphas_cumprod=_A , ) def a__ ( snake_case , snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = state.alphas_cumprod __SCREAMING_SNAKE_CASE : str = alphas_cumprod[timesteps] ** 0.5 __SCREAMING_SNAKE_CASE : Any = sqrt_alpha_prod.flatten() __SCREAMING_SNAKE_CASE : Any = broadcast_to_shape_from_left(snake_case , original_samples.shape ) __SCREAMING_SNAKE_CASE : Any = (1 - alphas_cumprod[timesteps]) ** 0.5 __SCREAMING_SNAKE_CASE : str = sqrt_one_minus_alpha_prod.flatten() __SCREAMING_SNAKE_CASE : str = broadcast_to_shape_from_left(snake_case , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def a__ ( snake_case , snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = get_sqrt_alpha_prod(snake_case , snake_case , snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def a__ ( snake_case , snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = get_sqrt_alpha_prod(snake_case , snake_case , snake_case , snake_case ) __SCREAMING_SNAKE_CASE : int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
303
_a = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)] def _a ( SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" __lowerCAmelCase: Optional[int] = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00] number //= 10_00_00 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution _a = [None] * 1_0_0_0_0_0_0_0 _a = True _a = False def _a ( SCREAMING_SNAKE_CASE : int ) -> bool: """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore __lowerCAmelCase: int = chain(next_number(SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase: Tuple = number_chain while number < 10_00_00_00: __lowerCAmelCase: Dict = number_chain number *= 10 return number_chain def _a ( SCREAMING_SNAKE_CASE : int = 10_00_00_00 ) -> int: """simple docstring""" for i in range(1 , SCREAMING_SNAKE_CASE ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() print(f"{solution() = }")
322
0
"""simple docstring""" import os from pathlib import Path def _lowerCAmelCase ( ): from torch.utils.cpp_extension import load UpperCAmelCase = Path(lowercase_ ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr' UpperCAmelCase = [ root / filename for filename in [ 'vision.cpp', os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ), os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ), ] ] load( 'MultiScaleDeformableAttention' , lowercase_ , with_cuda=lowercase_ , extra_include_paths=[str(lowercase_ )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[ '-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__', ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
78
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __lowerCAmelCase: List[Any] = f'''Input value of [number={number}] must be an integer''' raise TypeError(SCREAMING_SNAKE_CASE ) if number < 0: return False __lowerCAmelCase: str = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
322
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : int = { 'configuration_jukebox': [ 'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig', ], 'tokenization_jukebox': ['JukeboxTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
88
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A_ : def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict="last" , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=0 , ) -> Dict: __lowerCAmelCase: Optional[int] = parent __lowerCAmelCase: Dict = batch_size __lowerCAmelCase: Tuple = seq_length __lowerCAmelCase: Tuple = is_training __lowerCAmelCase: Optional[Any] = use_input_lengths __lowerCAmelCase: List[str] = use_token_type_ids __lowerCAmelCase: Dict = use_labels __lowerCAmelCase: int = gelu_activation __lowerCAmelCase: Optional[int] = sinusoidal_embeddings __lowerCAmelCase: Tuple = causal __lowerCAmelCase: Optional[Any] = asm __lowerCAmelCase: int = n_langs __lowerCAmelCase: Tuple = vocab_size __lowerCAmelCase: List[Any] = n_special __lowerCAmelCase: List[Any] = hidden_size __lowerCAmelCase: Union[str, Any] = num_hidden_layers __lowerCAmelCase: Dict = num_attention_heads __lowerCAmelCase: int = hidden_dropout_prob __lowerCAmelCase: List[str] = attention_probs_dropout_prob __lowerCAmelCase: Dict = max_position_embeddings __lowerCAmelCase: List[str] = type_sequence_label_size __lowerCAmelCase: str = initializer_range __lowerCAmelCase: List[str] = num_labels __lowerCAmelCase: List[str] = num_choices __lowerCAmelCase: Optional[int] = summary_type __lowerCAmelCase: Any = use_proj __lowerCAmelCase: Optional[Any] = scope __lowerCAmelCase: Dict = bos_token_id def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: __lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase: Any = None if self.use_input_lengths: __lowerCAmelCase: Optional[Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowerCAmelCase: str = None if self.use_token_type_ids: __lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowerCAmelCase: int = None __lowerCAmelCase: Optional[int] = None __lowerCAmelCase: Optional[int] = None if self.use_labels: __lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , 2 ).float() __lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase: Dict = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase ( self : Tuple ) -> List[Any]: return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , ) -> Optional[int]: __lowerCAmelCase: List[str] = XLMModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __lowerCAmelCase: Any = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase ) __lowerCAmelCase: List[str] = model(UpperCAmelCase , langs=UpperCAmelCase ) __lowerCAmelCase: List[str] = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , ) -> int: __lowerCAmelCase: str = XLMWithLMHeadModel(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]: __lowerCAmelCase: Dict = XLMForQuestionAnsweringSimple(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __lowerCAmelCase: str = model(UpperCAmelCase ) __lowerCAmelCase: List[str] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase ) __lowerCAmelCase: Optional[Any] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ) -> Tuple: __lowerCAmelCase: Union[str, Any] = XLMForQuestionAnswering(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __lowerCAmelCase: List[str] = model(UpperCAmelCase ) __lowerCAmelCase: Union[str, Any] = model( UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , ) __lowerCAmelCase: Any = model( UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , ) ((__lowerCAmelCase) , ): List[str] = result_with_labels.to_tuple() __lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase ) ((__lowerCAmelCase) , ): List[Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , ) -> List[Any]: __lowerCAmelCase: Optional[Any] = XLMForSequenceClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __lowerCAmelCase: List[Any] = model(UpperCAmelCase ) __lowerCAmelCase: Tuple = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , ) -> List[Any]: __lowerCAmelCase: Union[str, Any] = self.num_labels __lowerCAmelCase: Tuple = XLMForTokenClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , ) -> Union[str, Any]: __lowerCAmelCase: List[Any] = self.num_choices __lowerCAmelCase: Optional[Any] = XLMForMultipleChoice(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __lowerCAmelCase: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase: Any = model( UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self : Tuple ) -> int: __lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ): Union[str, Any] = config_and_inputs __lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ): _lowercase : Any = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowercase : Any = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowercase : Optional[int] = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str ) -> int: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False ) -> Dict: __lowerCAmelCase: Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __lowerCAmelCase: str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase ) __lowerCAmelCase: Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase ) return inputs_dict def UpperCAmelCase ( self : Union[str, Any] ) -> int: __lowerCAmelCase: int = XLMModelTester(self ) __lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 ) def UpperCAmelCase ( self : List[str] ) -> List[Any]: self.config_tester.run_common_tests() def UpperCAmelCase ( self : Dict ) -> List[Any]: __lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*UpperCAmelCase ) def UpperCAmelCase ( self : List[Any] ) -> int: __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase ) def UpperCAmelCase ( self : Tuple ) -> Tuple: __lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase ) def UpperCAmelCase ( self : Optional[Any] ) -> Tuple: __lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase ) def UpperCAmelCase ( self : Optional[Any] ) -> Any: __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase ) def UpperCAmelCase ( self : Tuple ) -> Tuple: __lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase ) def UpperCAmelCase ( self : Any ) -> Any: __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase ) def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=1 ) -> Dict: self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) self.assertListEqual( [isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) ) self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(UpperCAmelCase ): # adds PAD dummy token __lowerCAmelCase: int = min_length + idx + 1 __lowerCAmelCase: Union[str, Any] = min_length + idx + 1 __lowerCAmelCase: Any = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) ) def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=1 ) -> Union[str, Any]: self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) self.assertListEqual( [isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , ) self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(UpperCAmelCase ): # adds PAD dummy token __lowerCAmelCase: Any = min_length + idx + 1 __lowerCAmelCase: str = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , ) pass @slow def UpperCAmelCase ( self : int ) -> Tuple: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase: List[Any] = XLMModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @require_torch class A_ ( unittest.TestCase ): @slow def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: __lowerCAmelCase: Union[str, Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(UpperCAmelCase ) __lowerCAmelCase: Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president __lowerCAmelCase: Union[str, Any] = [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __lowerCAmelCase: str = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
322
0
'''simple docstring''' import os def SCREAMING_SNAKE_CASE_ ( ) -> str: with open(os.path.dirname(_UpperCAmelCase ) + """/grid.txt""" ) as f: _a : int =[] # noqa: E741 for _ in range(20 ): l.append([int(_UpperCAmelCase ) for x in f.readline().split()] ) _a : Union[str, Any] =0 # right for i in range(20 ): for j in range(17 ): _a : Tuple =l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: _a : Dict =temp # down for i in range(17 ): for j in range(20 ): _a : str =l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: _a : Any =temp # diagonal 1 for i in range(17 ): for j in range(17 ): _a : Union[str, Any] =l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: _a : str =temp # diagonal 2 for i in range(17 ): for j in range(3 ,20 ): _a : Optional[Any] =l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: _a : Optional[int] =temp return maximum if __name__ == "__main__": print(solution())
276
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]: """simple docstring""" __lowerCAmelCase: List[Any] = 0 __lowerCAmelCase: Optional[int] = len(SCREAMING_SNAKE_CASE ) for i in range(n - 1 ): for j in range(i + 1 , SCREAMING_SNAKE_CASE ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _a ( SCREAMING_SNAKE_CASE : Any ) -> str: """simple docstring""" if len(SCREAMING_SNAKE_CASE ) <= 1: return arr, 0 __lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE ) // 2 __lowerCAmelCase: str = arr[0:mid] __lowerCAmelCase: int = arr[mid:] __lowerCAmelCase , __lowerCAmelCase: List[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase: int = _count_cross_inversions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowerCAmelCase: int = inversion_p + inversions_q + cross_inversions return c, num_inversions def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]: """simple docstring""" __lowerCAmelCase: List[str] = [] __lowerCAmelCase: List[str] = 0 while i < len(SCREAMING_SNAKE_CASE ) and j < len(SCREAMING_SNAKE_CASE ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(SCREAMING_SNAKE_CASE ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(SCREAMING_SNAKE_CASE ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _a ( ) -> int: """simple docstring""" __lowerCAmelCase: List[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase: str = count_inversions_recursive(SCREAMING_SNAKE_CASE ) assert num_inversions_bf == num_inversions_recursive == 8 print('number of inversions = ' , SCREAMING_SNAKE_CASE ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , SCREAMING_SNAKE_CASE ) # an empty list should also have zero inversions __lowerCAmelCase: int = [] __lowerCAmelCase: Any = count_inversions_bf(SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
322
0
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = [randint(-10_00 , 10_00 ) for i in range(10 )] SCREAMING_SNAKE_CASE : Tuple = randint(-50_00 , 50_00 ) return (arr, r) __UpperCAmelCase = make_dataset() def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" for triplet in permutations(lowerCamelCase_ , 3 ): if sum(lowerCamelCase_ ) == target: return tuple(sorted(lowerCamelCase_ ) ) return (0, 0, 0) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" arr.sort() SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ) for i in range(n - 1 ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = """ from __main__ import dataset, triplet_sum1, triplet_sum2 """ SCREAMING_SNAKE_CASE : Tuple = """ triplet_sum1(*dataset) """ SCREAMING_SNAKE_CASE : List[Any] = """ triplet_sum2(*dataset) """ SCREAMING_SNAKE_CASE : Tuple = repeat(setup=lowerCamelCase_ , stmt=lowerCamelCase_ , repeat=5 , number=1_00_00 ) SCREAMING_SNAKE_CASE : Dict = repeat(setup=lowerCamelCase_ , stmt=lowerCamelCase_ , repeat=5 , number=1_00_00 ) return (min(lowerCamelCase_ ), min(lowerCamelCase_ )) if __name__ == "__main__": from doctest import testmod testmod() __UpperCAmelCase = solution_times() print(f'''The time for naive implementation is {times[0]}.''') print(f'''The time for optimized implementation is {times[1]}.''')
323
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 3_84 SCREAMING_SNAKE_CASE : Union[str, Any] = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE : List[str] = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2) SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE : Any = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE : int = 1_28 SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE : Optional[Any] = 12 SCREAMING_SNAKE_CASE : str = 5_12 elif "large" in model_name: SCREAMING_SNAKE_CASE : Tuple = 1_92 SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48) SCREAMING_SNAKE_CASE : Tuple = 12 SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68 # set label information SCREAMING_SNAKE_CASE : List[str] = 1_50 SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files""" SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig( embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) SCREAMING_SNAKE_CASE : List[str] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = val def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :] # fmt: on def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = x.shape[0] SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = x.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[ """state_dict""" ] for name, param in state_dict.items(): print(lowerCamelCase_ , param.shape ) SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ ) if "bn" in key: SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" ) SCREAMING_SNAKE_CASE : Optional[Any] = val # rename keys SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) read_in_q_k_v(lowerCamelCase_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ ) if "norm" in key: SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # verify on image SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor() SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCAmelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
323
1
'''simple docstring''' import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''align_text_model''' def __init__( self : Tuple , lowerCamelCase_ : List[str]=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Union[str, Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : Optional[Any]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Union[str, Any]=5_12 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : List[str]=1e-12 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Optional[int]=True , **lowerCamelCase_ : Dict , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = vocab_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE : Any = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE : str = hidden_act SCREAMING_SNAKE_CASE : Tuple = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Dict = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE : str = layer_norm_eps SCREAMING_SNAKE_CASE : Dict = position_embedding_type SCREAMING_SNAKE_CASE : int = use_cache SCREAMING_SNAKE_CASE : int = pad_token_id @classmethod def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ): '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ ) # get the text config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''align_vision_model''' def __init__( self : int , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = num_channels SCREAMING_SNAKE_CASE : Any = image_size SCREAMING_SNAKE_CASE : List[str] = width_coefficient SCREAMING_SNAKE_CASE : List[Any] = depth_coefficient SCREAMING_SNAKE_CASE : List[Any] = depth_divisor SCREAMING_SNAKE_CASE : str = kernel_sizes SCREAMING_SNAKE_CASE : List[str] = in_channels SCREAMING_SNAKE_CASE : Dict = out_channels SCREAMING_SNAKE_CASE : Tuple = depthwise_padding SCREAMING_SNAKE_CASE : int = strides SCREAMING_SNAKE_CASE : Tuple = num_block_repeats SCREAMING_SNAKE_CASE : Tuple = expand_ratios SCREAMING_SNAKE_CASE : Any = squeeze_expansion_ratio SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : List[str] = hidden_dim SCREAMING_SNAKE_CASE : Tuple = pooling_type SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : int = batch_norm_eps SCREAMING_SNAKE_CASE : str = batch_norm_momentum SCREAMING_SNAKE_CASE : Optional[int] = drop_connect_rate SCREAMING_SNAKE_CASE : Optional[int] = sum(lowerCamelCase_ ) * 4 @classmethod def lowerCamelCase_ ( cls : Optional[int] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : str ): '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": SCREAMING_SNAKE_CASE : int = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''align''' SCREAMING_SNAKE_CASE__ = True def __init__( self : Any , lowerCamelCase_ : Any=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Tuple=6_40 , lowerCamelCase_ : Tuple=1.0 , lowerCamelCase_ : List[str]=0.02 , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) if text_config is None: SCREAMING_SNAKE_CASE : List[Any] = {} logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" ) if vision_config is None: SCREAMING_SNAKE_CASE : Optional[int] = {} logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE : int = AlignTextConfig(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = AlignVisionConfig(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = projection_dim SCREAMING_SNAKE_CASE : Optional[Any] = temperature_init_value SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range @classmethod def lowerCamelCase_ ( cls : Union[str, Any] , lowerCamelCase_ : AlignTextConfig , lowerCamelCase_ : AlignVisionConfig , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Tuple = self.text_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
323
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Any = pad_token_id SCREAMING_SNAKE_CASE : List[Any] = max_length SCREAMING_SNAKE_CASE : Optional[int] = vocab SCREAMING_SNAKE_CASE : List[Any] = merges SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab() return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ): '''simple docstring''' return cls(**lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ ) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs( lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
323
1
'''simple docstring''' from __future__ import annotations def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if b == 0: return (1, 0) ((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : Optional[int] = extended_euclid(lowerCamelCase_ , a % b ) SCREAMING_SNAKE_CASE : str = a // b return (y, x - k * y) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" ((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : List[Any] = extended_euclid(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = na * na SCREAMING_SNAKE_CASE : Union[str, Any] = ra * x * na + ra * y * na return (n % m + m) % m def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" ((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : str = extended_euclid(lowerCamelCase_ , lowerCamelCase_ ) if b < 0: SCREAMING_SNAKE_CASE : Union[str, Any] = (b % n + n) % n return b def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = invert_modulo(lowerCamelCase_ , lowerCamelCase_ ), invert_modulo(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = na * na SCREAMING_SNAKE_CASE : List[Any] = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name="""chinese_remainder_theorem""", verbose=True) testmod(name="""chinese_remainder_theorem2""", verbose=True) testmod(name="""invert_modulo""", verbose=True) testmod(name="""extended_euclid""", verbose=True)
323
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train""" SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Union[str, Any] = streaming SCREAMING_SNAKE_CASE : Optional[int] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Any ): '''simple docstring''' pass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : int = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Tuple = streaming SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Dict ): '''simple docstring''' pass
323
1
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = [ ["""attention""", """attn"""], ["""encoder_attention""", """encoder_attn"""], ["""q_lin""", """q_proj"""], ["""k_lin""", """k_proj"""], ["""v_lin""", """v_proj"""], ["""out_lin""", """out_proj"""], ["""norm_embeddings""", """layernorm_embedding"""], ["""position_embeddings""", """embed_positions"""], ["""embeddings""", """embed_tokens"""], ["""ffn.lin""", """fc"""], ] def __A ( lowerCamelCase_ ): """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(lowerCamelCase_ , lowerCamelCase_ ) if k.startswith("""encoder""" ): SCREAMING_SNAKE_CASE : int = k.replace(""".attn""" , """.self_attn""" ) SCREAMING_SNAKE_CASE : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" ) SCREAMING_SNAKE_CASE : Optional[int] = k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): SCREAMING_SNAKE_CASE : List[str] = k.replace("""norm1""" , """self_attn_layer_norm""" ) SCREAMING_SNAKE_CASE : int = k.replace("""norm2""" , """encoder_attn_layer_norm""" ) SCREAMING_SNAKE_CASE : Any = k.replace("""norm3""" , """final_layer_norm""" ) return k def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: SCREAMING_SNAKE_CASE : int = sd.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd SCREAMING_SNAKE_CASE : List[str] = v __UpperCAmelCase = ["""START"""] @torch.no_grad() def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = torch.load(lowerCamelCase_ , map_location="""cpu""" ) SCREAMING_SNAKE_CASE : Any = model["""model"""] SCREAMING_SNAKE_CASE : Tuple = BlenderbotConfig.from_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = BlenderbotForConditionalGeneration(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = m.model.state_dict().keys() SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue SCREAMING_SNAKE_CASE : Optional[Any] = rename_state_dict_key(lowerCamelCase_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: SCREAMING_SNAKE_CASE : Tuple = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(lowerCamelCase_ ) m.model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ ) m.half() m.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""") parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""") parser.add_argument( """--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use""" ) __UpperCAmelCase = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
323
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small""" SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = """en_speaker_1""" SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string""" SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json""" SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings""" def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) SCREAMING_SNAKE_CASE : List[str] = 35 SCREAMING_SNAKE_CASE : List[Any] = 2 SCREAMING_SNAKE_CASE : int = 8 SCREAMING_SNAKE_CASE : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string ) SCREAMING_SNAKE_CASE : Tuple = tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
323
1
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def __A ( lowerCamelCase_ , lowerCamelCase_=False ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" SCREAMING_SNAKE_CASE : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE : Tuple = """""" else: SCREAMING_SNAKE_CASE : Dict = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE : int = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : Dict = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE : str = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE : Any = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-config.hidden_size :] def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowerCamelCase_ , lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase_ , lowerCamelCase_ ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = dct.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = val def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ViTMSNConfig() SCREAMING_SNAKE_CASE : Optional[Any] = 10_00 SCREAMING_SNAKE_CASE : Union[str, Any] = """datasets/huggingface/label-files""" SCREAMING_SNAKE_CASE : Dict = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ ) , """r""" ) ) SCREAMING_SNAKE_CASE : Any = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[int] = idalabel SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE : Any = 3_84 SCREAMING_SNAKE_CASE : Union[str, Any] = 15_36 SCREAMING_SNAKE_CASE : Optional[int] = 6 elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE : Optional[Any] = 10_24 SCREAMING_SNAKE_CASE : Any = 40_96 SCREAMING_SNAKE_CASE : Optional[Any] = 24 SCREAMING_SNAKE_CASE : List[str] = 16 SCREAMING_SNAKE_CASE : List[Any] = 0.1 elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE : Optional[int] = 4 elif "l7" in checkpoint_url: SCREAMING_SNAKE_CASE : Dict = 7 SCREAMING_SNAKE_CASE : Tuple = 10_24 SCREAMING_SNAKE_CASE : Optional[Any] = 40_96 SCREAMING_SNAKE_CASE : List[str] = 24 SCREAMING_SNAKE_CASE : Tuple = 16 SCREAMING_SNAKE_CASE : Optional[int] = 0.1 SCREAMING_SNAKE_CASE : Any = ViTMSNModel(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" )["""target_encoder"""] SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = create_rename_keys(lowerCamelCase_ , base_model=lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , base_model=lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) model.eval() SCREAMING_SNAKE_CASE : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ) SCREAMING_SNAKE_CASE : Union[str, Any] = ViTImageProcessor( size=config.image_size , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) SCREAMING_SNAKE_CASE : List[str] = model(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] ) elif "b16" in checkpoint_url: SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] ) elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] ) elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] ) else: SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCamelCase_ , atol=1E-4 ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) __UpperCAmelCase = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
323
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCAmelCase = logging.getLogger(__name__) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return (preds == labels).mean() @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) SCREAMING_SNAKE_CASE__ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ ) # Set seed set_seed(training_args.seed ) try: SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]() SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels() SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ ) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , ) # Get datasets SCREAMING_SNAKE_CASE : Optional[Any] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) SCREAMING_SNAKE_CASE : Dict = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase_ ) -> Dict: SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )} # Data collator SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer SCREAMING_SNAKE_CASE : Any = Trainer( model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE : Optional[Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate() SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase_ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(lowerCamelCase_ ) return results def __A ( lowerCamelCase_ ): """simple docstring""" main() if __name__ == "__main__": main()
323
1
'''simple docstring''' import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = f'''{sampling_rate}''' SCREAMING_SNAKE_CASE : Union[str, Any] = """1""" SCREAMING_SNAKE_CASE : Optional[Any] = """f32le""" SCREAMING_SNAKE_CASE : Union[str, Any] = [ """ffmpeg""", """-i""", """pipe:0""", """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] try: with subprocess.Popen(lowerCamelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: SCREAMING_SNAKE_CASE : List[Any] = ffmpeg_process.communicate(lowerCamelCase_ ) except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error SCREAMING_SNAKE_CASE : str = output_stream[0] SCREAMING_SNAKE_CASE : int = np.frombuffer(lowerCamelCase_ , np.floataa ) if audio.shape[0] == 0: raise ValueError("""Malformed soundfile""" ) return audio def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = "f32le" , ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = f'''{sampling_rate}''' SCREAMING_SNAKE_CASE : Optional[int] = """1""" if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE : Tuple = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE : Union[str, Any] = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) SCREAMING_SNAKE_CASE : int = platform.system() if system == "Linux": SCREAMING_SNAKE_CASE : List[str] = """alsa""" SCREAMING_SNAKE_CASE : int = """default""" elif system == "Darwin": SCREAMING_SNAKE_CASE : Union[str, Any] = """avfoundation""" SCREAMING_SNAKE_CASE : str = """:0""" elif system == "Windows": SCREAMING_SNAKE_CASE : Optional[int] = """dshow""" SCREAMING_SNAKE_CASE : int = """default""" SCREAMING_SNAKE_CASE : List[Any] = [ """ffmpeg""", """-f""", format_, """-i""", input_, """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-fflags""", """nobuffer""", """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample SCREAMING_SNAKE_CASE : Any = _ffmpeg_stream(lowerCamelCase_ , lowerCamelCase_ ) for item in iterator: yield item def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = "f32le" , ): """simple docstring""" if stream_chunk_s is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = stream_chunk_s else: SCREAMING_SNAKE_CASE : List[Any] = chunk_length_s SCREAMING_SNAKE_CASE : int = ffmpeg_microphone(lowerCamelCase_ , lowerCamelCase_ , format_for_conversion=lowerCamelCase_ ) if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE : Optional[Any] = np.intaa SCREAMING_SNAKE_CASE : Dict = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE : Optional[Any] = np.floataa SCREAMING_SNAKE_CASE : str = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: SCREAMING_SNAKE_CASE : str = chunk_length_s / 6 SCREAMING_SNAKE_CASE : Union[str, Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase_ , (int, float) ): SCREAMING_SNAKE_CASE : Tuple = [stride_length_s, stride_length_s] SCREAMING_SNAKE_CASE : Optional[int] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample SCREAMING_SNAKE_CASE : Tuple = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample SCREAMING_SNAKE_CASE : Any = datetime.datetime.now() SCREAMING_SNAKE_CASE : Optional[int] = datetime.timedelta(seconds=lowerCamelCase_ ) for item in chunk_bytes_iter(lowerCamelCase_ , lowerCamelCase_ , stride=(stride_left, stride_right) , stream=lowerCamelCase_ ): # Put everything back in numpy scale SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(item["""raw"""] , dtype=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = ( item["""stride"""][0] // size_of_sample, item["""stride"""][1] // size_of_sample, ) SCREAMING_SNAKE_CASE : Tuple = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = B"""""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase_ ) < chunk_len: SCREAMING_SNAKE_CASE : Dict = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase_ ) >= chunk_len: # We are flushing the accumulator SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right) SCREAMING_SNAKE_CASE : List[Any] = {"""raw""": acc[:chunk_len], """stride""": stride} if stream: SCREAMING_SNAKE_CASE : Optional[Any] = False yield item SCREAMING_SNAKE_CASE : Optional[int] = stride_left SCREAMING_SNAKE_CASE : Tuple = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase_ ) > stride_left: SCREAMING_SNAKE_CASE : Union[str, Any] = {"""raw""": acc, """stride""": (_stride_left, 0)} if stream: SCREAMING_SNAKE_CASE : Tuple = False yield item def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase_ , stdout=subprocess.PIPE , bufsize=lowerCamelCase_ ) as ffmpeg_process: while True: SCREAMING_SNAKE_CASE : Optional[int] = ffmpeg_process.stdout.read(lowerCamelCase_ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
323
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block SCREAMING_SNAKE_CASE : int = torch.nn.Convad( lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] ) # down SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0] for i, down_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = output_channel SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i] SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block( lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) self.down_blocks.append(lowerCamelCase_ ) # mid SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # out SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU() SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Tuple = False def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = x SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[Any] ): def custom_forward(*lowerCamelCase_ : List[str] ): return module(*lowerCamelCase_ ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ ) else: # down for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ ) # post-process SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : int = layers_per_block SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad( lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] ) SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None # mid SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # up SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : List[Any] = get_up_block( lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , ) self.up_blocks.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = output_channel # out if norm_type == "spatial": SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : Dict = nn.SiLU() SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Dict = False def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = z SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[str] ): def custom_forward(*lowerCamelCase_ : str ): return module(*lowerCamelCase_ ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ ) # post-process if latent_embeds is None: SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Tuple = n_e SCREAMING_SNAKE_CASE : int = vq_embed_dim SCREAMING_SNAKE_CASE : Tuple = beta SCREAMING_SNAKE_CASE : Union[str, Any] = legacy SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) SCREAMING_SNAKE_CASE : Optional[Any] = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0] SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed SCREAMING_SNAKE_CASE : Any = self.re_embed + 1 print( f'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' f'''Using {self.unknown_index} for unknown indices.''' ) else: SCREAMING_SNAKE_CASE : Optional[int] = n_e SCREAMING_SNAKE_CASE : Any = sane_index_shape def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 ) SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1 if self.unknown_index == "random": SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: SCREAMING_SNAKE_CASE : Any = self.unknown_index return new.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) if self.re_embed > self.used.shape[0]: # extra token SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ ) return back.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous() SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 ) SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape ) SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : List[str] = None # compute loss for embedding if not self.legacy: SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach() # reshape back to match original input shape SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): '''simple docstring''' if self.remap is not None: SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again # get quantized latent vectors SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ ) if shape is not None: SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ ) # reshape back to match original input shape SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = parameters SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 ) SCREAMING_SNAKE_CASE : Dict = deterministic SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar ) SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar ) if self.deterministic: SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = randn_tensor( self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample return x def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.mean
323
1
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def __A ( lowerCamelCase_ = 1_00_00_00 , lowerCamelCase_ = 10 ): """simple docstring""" SCREAMING_SNAKE_CASE : defaultdict = defaultdict(lowerCamelCase_ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: SCREAMING_SNAKE_CASE : Dict = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: SCREAMING_SNAKE_CASE : Optional[int] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(lowerCamelCase_ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
323
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : str = 3 SCREAMING_SNAKE_CASE : List[Any] = (32, 32) SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input return init_dict, inputs_dict
323
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") __UpperCAmelCase = logging.getLogger(__name__) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) SCREAMING_SNAKE_CASE__ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''The input training data file (a text file).'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCamelCase_ ( self : str ): '''simple docstring''' if self.train_file is not None: SCREAMING_SNAKE_CASE : str = self.train_file.split(""".""" )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: SCREAMING_SNAKE_CASE : str = self.validation_file.split(""".""" )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None def __call__( self : str , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = """label""" if """label""" in features[0].keys() else """labels""" SCREAMING_SNAKE_CASE : Tuple = [feature.pop(lowerCamelCase_ ) for feature in features] SCREAMING_SNAKE_CASE : Tuple = len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = len(features[0]["""input_ids"""] ) SCREAMING_SNAKE_CASE : Tuple = [ [{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase_ )] for feature in features ] SCREAMING_SNAKE_CASE : int = list(chain(*lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : str = self.tokenizer.pad( lowerCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , ) # Un-flatten SCREAMING_SNAKE_CASE : Optional[int] = {k: v.view(lowerCamelCase_ , lowerCamelCase_ , -1 ) for k, v in batch.items()} # Add back labels SCREAMING_SNAKE_CASE : str = torch.tensor(lowerCamelCase_ , dtype=torch.intaa ) return batch def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_swag""" , lowerCamelCase_ , lowerCamelCase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Optional[Any] = training_args.get_process_log_level() logger.setLevel(lowerCamelCase_ ) datasets.utils.logging.set_verbosity(lowerCamelCase_ ) transformers.utils.logging.set_verbosity(lowerCamelCase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. SCREAMING_SNAKE_CASE : str = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: SCREAMING_SNAKE_CASE : List[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: SCREAMING_SNAKE_CASE : Tuple = {} if data_args.train_file is not None: SCREAMING_SNAKE_CASE : Optional[Any] = data_args.train_file if data_args.validation_file is not None: SCREAMING_SNAKE_CASE : Any = data_args.validation_file SCREAMING_SNAKE_CASE : Optional[Any] = data_args.train_file.split(""".""" )[-1] SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset( lowerCamelCase_ , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. SCREAMING_SNAKE_CASE : Optional[int] = load_dataset( """swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. SCREAMING_SNAKE_CASE : Union[str, Any] = [f'''ending{i}''' for i in range(4 )] SCREAMING_SNAKE_CASE : Dict = """sent1""" SCREAMING_SNAKE_CASE : List[Any] = """sent2""" if data_args.max_seq_length is None: SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.model_max_length if max_seq_length > 10_24: logger.warning( """The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value""" """ of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can""" """ override this default with `--block_size xxx`.""" ) SCREAMING_SNAKE_CASE : Any = 10_24 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) SCREAMING_SNAKE_CASE : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Dict = [[context] * 4 for context in examples[context_name]] SCREAMING_SNAKE_CASE : List[Any] = examples[question_header_name] SCREAMING_SNAKE_CASE : List[str] = [ [f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase_ ) ] # Flatten out SCREAMING_SNAKE_CASE : Tuple = list(chain(*lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : int = list(chain(*lowerCamelCase_ ) ) # Tokenize SCREAMING_SNAKE_CASE : Optional[int] = tokenizer( lowerCamelCase_ , lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase_ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) SCREAMING_SNAKE_CASE : Optional[int] = raw_datasets["""train"""] if data_args.max_train_samples is not None: SCREAMING_SNAKE_CASE : List[str] = min(len(lowerCamelCase_ ) , data_args.max_train_samples ) SCREAMING_SNAKE_CASE : List[str] = train_dataset.select(range(lowerCamelCase_ ) ) with training_args.main_process_first(desc="""train dataset map pre-processing""" ): SCREAMING_SNAKE_CASE : Optional[int] = train_dataset.map( lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: SCREAMING_SNAKE_CASE : Any = min(len(lowerCamelCase_ ) , data_args.max_eval_samples ) SCREAMING_SNAKE_CASE : List[Any] = eval_dataset.select(range(lowerCamelCase_ ) ) with training_args.main_process_first(desc="""validation dataset map pre-processing""" ): SCREAMING_SNAKE_CASE : List[str] = eval_dataset.map( lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator SCREAMING_SNAKE_CASE : Tuple = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(lowerCamelCase_ ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = eval_predictions SCREAMING_SNAKE_CASE : Any = np.argmax(lowerCamelCase_ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer SCREAMING_SNAKE_CASE : Tuple = Trainer( model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , ) # Training if training_args.do_train: SCREAMING_SNAKE_CASE : int = None if training_args.resume_from_checkpoint is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: SCREAMING_SNAKE_CASE : List[str] = last_checkpoint SCREAMING_SNAKE_CASE : Any = trainer.train(resume_from_checkpoint=lowerCamelCase_ ) trainer.save_model() # Saves the tokenizer too for easy upload SCREAMING_SNAKE_CASE : Dict = train_result.metrics SCREAMING_SNAKE_CASE : int = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : str = min(lowerCamelCase_ , len(lowerCamelCase_ ) ) trainer.log_metrics("""train""" , lowerCamelCase_ ) trainer.save_metrics("""train""" , lowerCamelCase_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE : Optional[int] = trainer.evaluate() SCREAMING_SNAKE_CASE : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = min(lowerCamelCase_ , len(lowerCamelCase_ ) ) trainer.log_metrics("""eval""" , lowerCamelCase_ ) trainer.save_metrics("""eval""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """multiple-choice""", """dataset_tags""": """swag""", """dataset_args""": """regular""", """dataset""": """SWAG""", """language""": """en""", } if training_args.push_to_hub: trainer.push_to_hub(**lowerCamelCase_ ) else: trainer.create_model_card(**lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" main() if __name__ == "__main__": main()
323
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' pass def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ ) import datasets SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) SCREAMING_SNAKE_CASE : Any = depth_estimator( [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] ) self.assertEqual( [ {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, ] , lowerCamelCase_ , ) @require_tf @unittest.skip("""Depth estimation is not implemented in TF""" ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large""" SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
323
1
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 3 SCREAMING_SNAKE_CASE : Any = 2_50 SCREAMING_SNAKE_CASE : Dict = ids_tensor((batch_size, length) , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones((batch_size, length) , device=lowerCamelCase_ , dtype=torch.float ) / length return input_ids, scores def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_tensors(5 ) SCREAMING_SNAKE_CASE : Union[str, Any] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self._get_tensors(9 ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = MaxLengthCriteria(max_length=10 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self._get_tensors(5 ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self._get_tensors(9 ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_tensors(10 ) self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self._get_tensors(5 ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self._get_tensors(9 ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self._get_tensors(10 ) self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Dict = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self._get_tensors(5 ) SCREAMING_SNAKE_CASE : Union[str, Any] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(lowerCamelCase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) SCREAMING_SNAKE_CASE : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(lowerCamelCase_ ) , 1 )
323
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18} SCREAMING_SNAKE_CASE : Optional[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = min_resolution SCREAMING_SNAKE_CASE : List[str] = max_resolution SCREAMING_SNAKE_CASE : str = do_resize SCREAMING_SNAKE_CASE : Optional[Any] = size SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize SCREAMING_SNAKE_CASE : List[Any] = image_mean SCREAMING_SNAKE_CASE : str = image_std def lowerCamelCase_ ( self : Any ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self ) @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' pass def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
323
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __UpperCAmelCase = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
323
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCAmelCase = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
323
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE : List[str] = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE : Optional[Any] = output.images SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE : Optional[Any] = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[Any] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE : Tuple = output.images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE : Any = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) SCREAMING_SNAKE_CASE : Optional[int] = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Any = output.images SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) SCREAMING_SNAKE_CASE : Dict = np.array( [0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
323
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
323
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { """configuration_autoformer""": [ """AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AutoformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """AutoformerForPrediction""", """AutoformerModel""", """AutoformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
323
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number | (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number & ~(1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number ^ (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return ((number >> position) & 1) == 1 def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
323
1
'''simple docstring''' import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __UpperCAmelCase = """bart""" __UpperCAmelCase = True @st.cache(allow_output_mutation=lowerCamelCase_ ) def __A ( ): """simple docstring""" if LOAD_DENSE_INDEX: SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" ) SCREAMING_SNAKE_CASE : List[Any] = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" ) SCREAMING_SNAKE_CASE : Tuple = qar_model.eval() else: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = (None, None) if MODEL_TYPE == "bart": SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" ) SCREAMING_SNAKE_CASE : List[str] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" ) SCREAMING_SNAKE_CASE : Tuple = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" ) sas_model.load_state_dict(save_dict["""model"""] ) SCREAMING_SNAKE_CASE : str = sas_model.eval() else: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = make_qa_sas_model( model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=lowerCamelCase_ ) def __A ( ): """simple docstring""" if LOAD_DENSE_INDEX: SCREAMING_SNAKE_CASE : int = faiss.StandardGpuResources() SCREAMING_SNAKE_CASE : str = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""] SCREAMING_SNAKE_CASE : Tuple = np.memmap( """wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 1_28) , ) SCREAMING_SNAKE_CASE : Union[str, Any] = faiss.IndexFlatIP(1_28 ) SCREAMING_SNAKE_CASE : Union[str, Any] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ ) wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU else: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = (None, None) SCREAMING_SNAKE_CASE : List[Any] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=lowerCamelCase_ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" ) SCREAMING_SNAKE_CASE : Optional[Any] = elia["""train_eli5"""] SCREAMING_SNAKE_CASE : Any = np.memmap( """eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 1_28) ) SCREAMING_SNAKE_CASE : List[Any] = faiss.IndexFlatIP(1_28 ) eli5_train_q_index.add(lowerCamelCase_ ) return (elia_train, eli5_train_q_index) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = load_indexes() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = load_models() __UpperCAmelCase , __UpperCAmelCase = load_train_data() def __A ( lowerCamelCase_ , lowerCamelCase_=10 ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = [elia_train[int(lowerCamelCase_ )] for i in I[0]] return nn_examples def __A ( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ): """simple docstring""" if source == "none": SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), []) else: if method == "dense": SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = query_qa_dense_index( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = query_es_index( lowerCamelCase_ , lowerCamelCase_ , index_name="""english_wiki40b_snippets_100w""" , n_results=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : str = [ (res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst ] SCREAMING_SNAKE_CASE : List[Any] = """question: {} context: {}""".format(lowerCamelCase_ , lowerCamelCase_ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda lowerCamelCase_ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None), } ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=2_56 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ): """simple docstring""" with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = qa_sas_generate( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=10_24 , device="""cuda:0""" , )[0] return (answer, support_list) st.title("""Long Form Question Answering with ELI5""") # Start sidebar __UpperCAmelCase = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>""" __UpperCAmelCase = """ <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class=\"img-container\"> <!-- Inline parent element --> %s </span> </body> </html> """ % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __UpperCAmelCase = """ This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. """ st.sidebar.markdown(description, unsafe_allow_html=True) __UpperCAmelCase = [ """Answer the question""", """View the retrieved document only""", """View the most similar ELI5 question and answer""", """Show me everything, please!""", ] __UpperCAmelCase = st.sidebar.checkbox("""Demo options""") if demo_options: __UpperCAmelCase = st.sidebar.selectbox( """""", action_list, index=3, ) __UpperCAmelCase = action_list.index(action_st) __UpperCAmelCase = st.sidebar.selectbox( """""", ["""Show full text of passages""", """Show passage section titles"""], index=0, ) __UpperCAmelCase = show_type == """Show full text of passages""" else: __UpperCAmelCase = 3 __UpperCAmelCase = True __UpperCAmelCase = st.sidebar.checkbox("""Retrieval options""") if retrieval_options: __UpperCAmelCase = """ ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. """ st.sidebar.markdown(retriever_info) __UpperCAmelCase = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""]) __UpperCAmelCase = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""]) else: __UpperCAmelCase = """wiki40b""" __UpperCAmelCase = """dense""" __UpperCAmelCase = """beam""" __UpperCAmelCase = 2 __UpperCAmelCase = 64 __UpperCAmelCase = 256 __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = st.sidebar.checkbox("""Generation options""") if generate_options: __UpperCAmelCase = """ ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder's output probabilities. """ st.sidebar.markdown(generate_info) __UpperCAmelCase = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""]) __UpperCAmelCase = st.sidebar.slider( """Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) __UpperCAmelCase = st.sidebar.slider( """Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": __UpperCAmelCase = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __UpperCAmelCase = st.sidebar.slider( """Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) __UpperCAmelCase = st.sidebar.slider( """Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) __UpperCAmelCase = None # start main text __UpperCAmelCase = [ """<MY QUESTION>""", """How do people make chocolate?""", """Why do we get a fever when we are sick?""", """How can different animals perceive different colors?""", """What is natural language processing?""", """What's the best way to treat a sunburn?""", """What exactly are vitamins ?""", """How does nuclear energy provide electricity?""", """What's the difference between viruses and bacteria?""", """Why are flutes classified as woodwinds when most of them are made out of metal ?""", """Why do people like drinking coffee even though it tastes so bad?""", """What happens when wine ages? How does it make the wine taste better?""", """If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""", """How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""", """How does New Zealand have so many large bird predators?""", ] __UpperCAmelCase = st.selectbox( """What would you like to ask? ---- select <MY QUESTION> to enter a new query""", questions_list, index=1, ) if question_s == "<MY QUESTION>": __UpperCAmelCase = st.text_input("""Enter your question here:""", """""") else: __UpperCAmelCase = question_s if st.button("""Show me!"""): if action in [0, 1, 3]: if index_type == "mixed": __UpperCAmelCase , __UpperCAmelCase = make_support(question, source=wiki_source, method="""dense""", n_results=10) __UpperCAmelCase , __UpperCAmelCase = make_support(question, source=wiki_source, method="""sparse""", n_results=10) __UpperCAmelCase = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __UpperCAmelCase = support_list[:10] __UpperCAmelCase = """<P> """ + """ <P> """.join([res[-1] for res in support_list]) else: __UpperCAmelCase , __UpperCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __UpperCAmelCase , __UpperCAmelCase = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == """sampled"""), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("""### The model generated answer is:""") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""") for i, res in enumerate(support_list): __UpperCAmelCase = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_""")) __UpperCAmelCase = res[1].strip() if sec_titles == "": __UpperCAmelCase = """[{}]({})""".format(res[0], wiki_url) else: __UpperCAmelCase = sec_titles.split(""" & """) __UpperCAmelCase = """ & """.join( ["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list] ) st.markdown( """{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( """> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True ) if action in [2, 3]: __UpperCAmelCase = find_nearest_training(question) __UpperCAmelCase = nn_train_list[0] st.markdown( """--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""]) ) __UpperCAmelCase = [ """{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""])) for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""])) if i == 0 or sc > 2 ] st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st))) __UpperCAmelCase = """ --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* """ st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
323
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Optional[int] = batch_size SCREAMING_SNAKE_CASE : Any = seq_length SCREAMING_SNAKE_CASE : List[str] = is_training SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : List[Any] = rotary_dim SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = vocab_size - 1 SCREAMING_SNAKE_CASE : str = vocab_size - 1 SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1 def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : List[str] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 20 SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : str = model( input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 20 SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Dict = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @tooslow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id SCREAMING_SNAKE_CASE : str = jax.jit(model.generate ) SCREAMING_SNAKE_CASE : str = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = 1 SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = fx_state with torch.no_grad(): SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Dict = 1 SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Tuple = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase_ )
323
1
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def __A ( lowerCamelCase_ ): """simple docstring""" print("""Loading config file...""" ) def flatten_yaml_as_dict(lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_="." ): SCREAMING_SNAKE_CASE : Union[str, Any] = [] for k, v in d.items(): SCREAMING_SNAKE_CASE : List[str] = parent_key + sep + k if parent_key else k if isinstance(lowerCamelCase_ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(lowerCamelCase_ , lowerCamelCase_ , sep=lowerCamelCase_ ).items() ) else: items.append((new_key, v) ) return dict(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = argparse.Namespace() with open(lowerCamelCase_ , """r""" ) as yaml_file: try: SCREAMING_SNAKE_CASE : Dict = yaml.load(lowerCamelCase_ , Loader=yaml.FullLoader ) SCREAMING_SNAKE_CASE : str = flatten_yaml_as_dict(lowerCamelCase_ ) for k, v in flat_cfg.items(): setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(lowerCamelCase_ , str(lowerCamelCase_ ) ) ) return config def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTVaConfig() SCREAMING_SNAKE_CASE : List[str] = False # dataset if task_name.startswith("""imagenet1k_""" ): SCREAMING_SNAKE_CASE : Dict = 10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: SCREAMING_SNAKE_CASE : Any = 3_84 else: SCREAMING_SNAKE_CASE : Union[str, Any] = 2_56 SCREAMING_SNAKE_CASE : Union[str, Any] = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): SCREAMING_SNAKE_CASE : Union[str, Any] = 2_10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: SCREAMING_SNAKE_CASE : Dict = 3_84 else: SCREAMING_SNAKE_CASE : Any = 2_56 SCREAMING_SNAKE_CASE : Dict = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): SCREAMING_SNAKE_CASE : Optional[int] = 1_51 SCREAMING_SNAKE_CASE : int = 5_12 SCREAMING_SNAKE_CASE : Union[str, Any] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE : Dict = True elif task_name.startswith("""voc_""" ): SCREAMING_SNAKE_CASE : str = 21 SCREAMING_SNAKE_CASE : int = 5_12 SCREAMING_SNAKE_CASE : List[str] = """pascal-voc-id2label.json""" SCREAMING_SNAKE_CASE : Any = True # orig_config SCREAMING_SNAKE_CASE : Union[str, Any] = load_orig_config_file(lowerCamelCase_ ) assert getattr(lowerCamelCase_ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(lowerCamelCase_ , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(lowerCamelCase_ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCamelCase_ , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCamelCase_ , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: SCREAMING_SNAKE_CASE : Optional[Any] = getattr(lowerCamelCase_ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) SCREAMING_SNAKE_CASE : str = getattr(lowerCamelCase_ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 ) SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowerCamelCase_ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label SCREAMING_SNAKE_CASE : str = """huggingface/label-files""" SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE : Tuple = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Tuple = idalabel SCREAMING_SNAKE_CASE : Any = {v: k for k, v in idalabel.items()} return config def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = dct.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = val def __A ( lowerCamelCase_ , lowerCamelCase_=False ): """simple docstring""" if base_model: SCREAMING_SNAKE_CASE : int = """""" else: SCREAMING_SNAKE_CASE : Union[str, Any] = """mobilevitv2.""" SCREAMING_SNAKE_CASE : List[Any] = [] for k in state_dict.keys(): if k[:8] == "encoder.": SCREAMING_SNAKE_CASE : str = k[8:] else: SCREAMING_SNAKE_CASE : Union[str, Any] = k if ".block." in k: SCREAMING_SNAKE_CASE : List[Any] = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: SCREAMING_SNAKE_CASE : Any = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: SCREAMING_SNAKE_CASE : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: SCREAMING_SNAKE_CASE : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: SCREAMING_SNAKE_CASE : List[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: SCREAMING_SNAKE_CASE : Tuple = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: SCREAMING_SNAKE_CASE : Tuple = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: SCREAMING_SNAKE_CASE : str = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: SCREAMING_SNAKE_CASE : int = [0, 1] elif i == 4: SCREAMING_SNAKE_CASE : Tuple = [0, 1, 2, 3] elif i == 5: SCREAMING_SNAKE_CASE : Optional[int] = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: SCREAMING_SNAKE_CASE : Any = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: SCREAMING_SNAKE_CASE : Tuple = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: SCREAMING_SNAKE_CASE : List[str] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: SCREAMING_SNAKE_CASE : str = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: SCREAMING_SNAKE_CASE : Union[str, Any] = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: SCREAMING_SNAKE_CASE : Union[str, Any] = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(lowerCamelCase_ ) for k in keys_to_ignore: state_dict.pop(lowerCamelCase_ , lowerCamelCase_ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ) return im @torch.no_grad() def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = get_mobilevitva_config(lowerCamelCase_ , lowerCamelCase_ ) # load original state_dict SCREAMING_SNAKE_CASE : Any = torch.load(lowerCamelCase_ , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): SCREAMING_SNAKE_CASE : int = MobileViTVaForSemanticSegmentation(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : List[Any] = False else: SCREAMING_SNAKE_CASE : int = MobileViTVaForImageClassification(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : Union[str, Any] = False # remove and rename some keys of load the original model SCREAMING_SNAKE_CASE : List[str] = checkpoint remove_unused_keys(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = create_rename_keys(lowerCamelCase_ , base_model=lowerCamelCase_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # load modified state_dict model.load_state_dict(lowerCamelCase_ ) # Check outputs on an image, prepared by MobileViTImageProcessor SCREAMING_SNAKE_CASE : Any = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(**lowerCamelCase_ ) # verify classification model if task_name.startswith("""imagenet""" ): SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits SCREAMING_SNAKE_CASE : Dict = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ) assert torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""imagenet1k_256""", type=str, help=( """Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """ """ Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 """ ), choices=[ """imagenet1k_256""", """imagenet1k_384""", """imagenet21k_to_1k_256""", """imagenet21k_to_1k_384""", """ade20k_deeplabv3""", """voc_deeplabv3""", ], ) parser.add_argument( """--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) __UpperCAmelCase = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
323
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' f''' `n_embd`: {n_embd} are not equal.''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim SCREAMING_SNAKE_CASE : Tuple = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : str = ( nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : Any = GPTaConfig( vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 ) SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ): '''simple docstring''' return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.encode_prefix(lowerCamelCase_ ) @torch.no_grad() def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 ) SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : Tuple = [] for feature in features: SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature # Only support beam search for now SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam( input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = eos_token_id SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool ) if input_embeds is not None: SCREAMING_SNAKE_CASE : Dict = input_embeds else: SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ ) for i in range(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = outputs.logits SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log() if scores is None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: SCREAMING_SNAKE_CASE : List[Any] = next_tokens else: SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] ) SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 ) else: SCREAMING_SNAKE_CASE : Tuple = -float(np.inf ) SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits seq_lengths[~is_stopped] += 1 SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1] SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source] SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1] SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 ) SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source] SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source] SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source] SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 ) SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze() if is_stopped.all(): break SCREAMING_SNAKE_CASE : int = scores / seq_lengths SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ ) # tokens tensors are already padded to max_seq_length SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order] SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
323
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Any , *lowerCamelCase_ : str , **lowerCamelCase_ : Dict ): '''simple docstring''' warnings.warn( """The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use BeitImageProcessor instead.""" , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
323
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git_vision_model''' def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : str = attention_dropout SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : List[str] = hidden_act @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ): '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git''' def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ ) if vision_config is None: SCREAMING_SNAKE_CASE : Any = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : Tuple = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : str = hidden_act SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings SCREAMING_SNAKE_CASE : int = num_image_with_embedding SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id SCREAMING_SNAKE_CASE : str = eos_token_id def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
323
1
'''simple docstring''' # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
323
'''simple docstring''' from manim import * class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 ) SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 ) SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) model.move_to([3, -1.0, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i, rect in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 ) target.move_to(lowerCamelCase_ ) model_arr.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(lowerCamelCase_ ) self.add(*lowerCamelCase_ , *lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) disk.move_to([-4, -1.25, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 ) input.set_fill(lowerCamelCase_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 ) self.play(Write(lowerCamelCase_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 ) self.play(MoveToTarget(lowerCamelCase_ ) ) self.play(FadeOut(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02} self.play( Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) SCREAMING_SNAKE_CASE : Optional[int] = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) SCREAMING_SNAKE_CASE : Any = AnimationGroup( FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(lowerCamelCase_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: SCREAMING_SNAKE_CASE : Optional[Any] = 0.7 self.play( Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = a_c SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , ) SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) ) self.wait()
323
1
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,) SCREAMING_SNAKE_CASE__ = 10 def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = { """num_train_timesteps""": 2_01, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**lowerCamelCase_ ) return config def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 10 SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0] SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1] SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = 1 scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = scheduler.timesteps SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCamelCase_ ): # 1. scale model input SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0] scheduler.set_timesteps(timesteps=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Dict = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0] with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=lowerCamelCase_ )
323
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = graph # mapping node to its parent in resulting breadth first tree SCREAMING_SNAKE_CASE : dict[str, str | None] = {} SCREAMING_SNAKE_CASE : List[str] = source_vertex def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex} SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue while queue: SCREAMING_SNAKE_CASE : str = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = vertex queue.append(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ ) if target_vertex_parent is None: SCREAMING_SNAKE_CASE : Tuple = ( f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(lowerCamelCase_ ) return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}''' if __name__ == "__main__": __UpperCAmelCase = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
323
1
'''simple docstring''' import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 __UpperCAmelCase = get_tests_dir("""fixtures/dummy-config.json""") class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = 0 def lowerCamelCase_ ( self : int ): '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowerCamelCase_ , """fake-roberta""" ) os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) with open(os.path.join(lowerCamelCase_ , """config.json""" ) , """w""" ) as f: f.write(json.dumps({} ) ) SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertEqual(type(lowerCamelCase_ ) , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' try: AutoConfig.register("""custom""" , lowerCamelCase_ ) # Wrong model type will raise an error with self.assertRaises(lowerCamelCase_ ): AutoConfig.register("""model""" , lowerCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoConfig.register("""bert""" , lowerCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE : List[Any] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def lowerCamelCase_ ( self : int ): '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained("""bert-base""" ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase_ , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ): SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def lowerCamelCase_ ( self : int ): '''simple docstring''' with self.assertRaises(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ ) self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" ) def lowerCamelCase_ ( self : str ): '''simple docstring''' class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''new-model''' try: AutoConfig.register("""new-model""" , lowerCamelCase_ ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
323
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __UpperCAmelCase = 0 __UpperCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __UpperCAmelCase = tuple[int, int] class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = pos_x SCREAMING_SNAKE_CASE : Any = pos_y SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x) SCREAMING_SNAKE_CASE : Tuple = goal_x SCREAMING_SNAKE_CASE : List[str] = goal_y SCREAMING_SNAKE_CASE : Optional[Any] = g_cost SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : int = self.calculate_heuristic() SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ): '''simple docstring''' return self.f_cost < other.f_cost class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = [self.start] SCREAMING_SNAKE_CASE : list[Node] = [] SCREAMING_SNAKE_CASE : str = False def lowerCamelCase_ ( self : Any ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowerCamelCase_ ) self.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCamelCase_ ) else: self.open_nodes.append(lowerCamelCase_ ) return [self.start.pos] def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [] for action in delta: SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1] SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) ) return successors def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = node SCREAMING_SNAKE_CASE : List[str] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent path.reverse() return path class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = False def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowerCamelCase_ , lowerCamelCase_ ) self.fwd_astar.closed_nodes.append(lowerCamelCase_ ) self.bwd_astar.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node SCREAMING_SNAKE_CASE : Any = current_fwd_node SCREAMING_SNAKE_CASE : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop( astar.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowerCamelCase_ ) else: astar.open_nodes.append(lowerCamelCase_ ) return [self.fwd_astar.start.pos] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ ) bwd_path.pop() bwd_path.reverse() SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __UpperCAmelCase = (0, 0) __UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __UpperCAmelCase = time.time() __UpperCAmelCase = AStar(init, goal) __UpperCAmelCase = a_star.search() __UpperCAmelCase = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') __UpperCAmelCase = time.time() __UpperCAmelCase = BidirectionalAStar(init, goal) __UpperCAmelCase = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
323
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right __UpperCAmelCase = 250004 __UpperCAmelCase = 250020 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = MBartTokenizer SCREAMING_SNAKE_CASE__ = MBartTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE : List[str] = MBartTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = MBartTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowerCamelCase_ ( self : str ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return SCREAMING_SNAKE_CASE : List[str] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Any = tokenizer_r.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) SCREAMING_SNAKE_CASE : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCamelCase_ ) # Save tokenizer rust, legacy_format=True SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) # Save tokenizer rust, legacy_format=False SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Any = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''facebook/mbart-large-en-ro''' SCREAMING_SNAKE_CASE__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] SCREAMING_SNAKE_CASE__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] SCREAMING_SNAKE_CASE__ = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE] @classmethod def lowerCamelCase_ ( cls : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) SCREAMING_SNAKE_CASE : Optional[Any] = 1 return cls def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids ) SCREAMING_SNAKE_CASE : List[Any] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = 10 SCREAMING_SNAKE_CASE : int = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCamelCase_ ) self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Any = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = MBartTokenizer.from_pretrained(lowerCamelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ ) @require_torch def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE : Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) SCREAMING_SNAKE_CASE : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer( text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE : str = targets["""input_ids"""] SCREAMING_SNAKE_CASE : Optional[Any] = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(lowerCamelCase_ ) , { # A, test, EOS, en_XX """input_ids""": [[62, 30_34, 2, 25_00_04]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 25_00_01, } , )
323
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''efficientnet''' def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : int = width_coefficient SCREAMING_SNAKE_CASE : List[str] = depth_coefficient SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor SCREAMING_SNAKE_CASE : List[str] = kernel_sizes SCREAMING_SNAKE_CASE : Dict = in_channels SCREAMING_SNAKE_CASE : List[str] = out_channels SCREAMING_SNAKE_CASE : Any = depthwise_padding SCREAMING_SNAKE_CASE : Dict = strides SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats SCREAMING_SNAKE_CASE : Any = expand_ratios SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dim SCREAMING_SNAKE_CASE : List[str] = pooling_type SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : Any = batch_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum SCREAMING_SNAKE_CASE : Dict = dropout_rate SCREAMING_SNAKE_CASE : int = drop_connect_rate SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4 class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return 1e-5
323
1
'''simple docstring''' # flake8: noqa # Lint as: python3 __UpperCAmelCase = [ """VerificationMode""", """Version""", """disable_progress_bar""", """enable_progress_bar""", """is_progress_bar_enabled""", """experimental""", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
323
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): '''simple docstring''' super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {} SCREAMING_SNAKE_CASE : List[Any] = {} if prompt is not None: SCREAMING_SNAKE_CASE : List[Any] = prompt if generate_kwargs is not None: SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: SCREAMING_SNAKE_CASE : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ): '''simple docstring''' return super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ ) if prompt is not None: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type if model_type == "git": SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCamelCase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: SCREAMING_SNAKE_CASE : Optional[Any] = None return model_inputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ): '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): SCREAMING_SNAKE_CASE : List[str] = None if generate_kwargs is None: SCREAMING_SNAKE_CASE : int = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name ) SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ ) return model_outputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [] for output_ids in model_outputs: SCREAMING_SNAKE_CASE : List[Any] = { """generated_text""": self.tokenizer.decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , ) } records.append(lowerCamelCase_ ) return records
323
1
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __UpperCAmelCase = """pt""" elif is_tf_available(): __UpperCAmelCase = """tf""" else: __UpperCAmelCase = """jax""" class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = PerceiverTokenizer SCREAMING_SNAKE_CASE__ = False def lowerCamelCase_ ( self : str ): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE : str = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCamelCase_ ( self : int ): '''simple docstring''' return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" ) def lowerCamelCase_ ( self : Optional[int] , **lowerCamelCase_ : List[Any] ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int=False , lowerCamelCase_ : Tuple=20 , lowerCamelCase_ : List[Any]=5 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] for i in range(len(lowerCamelCase_ ) ): try: SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) SCREAMING_SNAKE_CASE : Optional[int] = list(filter(lambda lowerCamelCase_ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase_ ) , lowerCamelCase_ ) ) if max_length is not None and len(lowerCamelCase_ ) > max_length: SCREAMING_SNAKE_CASE : Tuple = toks[:max_length] if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0: while len(lowerCamelCase_ ) < min_length: SCREAMING_SNAKE_CASE : Dict = toks + toks # toks_str = [t[1] for t in toks] SCREAMING_SNAKE_CASE : Optional[Any] = [t[0] for t in toks] # Ensure consistency SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ ) if " " not in output_txt and len(lowerCamelCase_ ) > 1: SCREAMING_SNAKE_CASE : Union[str, Any] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase_ ) + """ """ + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase_ ) ) if with_prefix_space: SCREAMING_SNAKE_CASE : Optional[int] = """ """ + output_txt SCREAMING_SNAKE_CASE : str = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) return output_txt, output_ids def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.perceiver_tokenizer SCREAMING_SNAKE_CASE : List[Any] = """Unicode €.""" SCREAMING_SNAKE_CASE : Any = tokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5] self.assertEqual(encoded["""input_ids"""] , lowerCamelCase_ ) # decoding SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , """[CLS]Unicode €.[SEP]""" ) SCREAMING_SNAKE_CASE : Tuple = tokenizer("""e è é ê ë""" ) SCREAMING_SNAKE_CASE : Optional[Any] = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5] self.assertEqual(encoded["""input_ids"""] , lowerCamelCase_ ) # decoding SCREAMING_SNAKE_CASE : str = tokenizer.decode(lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , """[CLS]e è é ê ë[SEP]""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.perceiver_tokenizer SCREAMING_SNAKE_CASE : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off SCREAMING_SNAKE_CASE : Optional[int] = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0] # fmt: on SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) if FRAMEWORK != "jax": SCREAMING_SNAKE_CASE : Union[str, Any] = list(batch.input_ids.numpy()[0] ) else: SCREAMING_SNAKE_CASE : str = list(batch.input_ids.tolist()[0] ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.perceiver_tokenizer SCREAMING_SNAKE_CASE : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] SCREAMING_SNAKE_CASE : str = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""" , lowerCamelCase_ ) self.assertIn("""attention_mask""" , lowerCamelCase_ ) self.assertNotIn("""decoder_input_ids""" , lowerCamelCase_ ) self.assertNotIn("""decoder_attention_mask""" , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.perceiver_tokenizer SCREAMING_SNAKE_CASE : List[str] = [ """Summary of the text.""", """Another summary.""", ] SCREAMING_SNAKE_CASE : Optional[int] = tokenizer( text_target=lowerCamelCase_ , max_length=32 , padding="""max_length""" , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test SCREAMING_SNAKE_CASE : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Optional[Any] = """ He is very happy, UNwant\u00E9d,running""" SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) tokenizer.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.__class__.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) shutil.rmtree(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Optional[int] = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) SCREAMING_SNAKE_CASE : Dict = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) tokenizer.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.__class__.from_pretrained(lowerCamelCase_ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowerCamelCase_ ) with open(os.path.join(lowerCamelCase_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ ) with open(os.path.join(lowerCamelCase_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: SCREAMING_SNAKE_CASE : Tuple = json.load(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(1_25 )] SCREAMING_SNAKE_CASE : Optional[int] = added_tokens_extra_ids + [ """an_additional_special_token""" ] SCREAMING_SNAKE_CASE : List[Any] = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(lowerCamelCase_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(lowerCamelCase_ , lowerCamelCase_ ) with open(os.path.join(lowerCamelCase_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(lowerCamelCase_ , lowerCamelCase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files SCREAMING_SNAKE_CASE : int = tokenizer_class.from_pretrained( lowerCamelCase_ , ) self.assertIn( """an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained SCREAMING_SNAKE_CASE : Any = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=lowerCamelCase_ )] SCREAMING_SNAKE_CASE : str = tokenizer_class.from_pretrained( lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , ) self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_78] ) , """�""" ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' pass def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase_ ( self : int ): '''simple docstring''' pass def lowerCamelCase_ ( self : int ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers(fast=lowerCamelCase_ , do_lower_case=lowerCamelCase_ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE : Any = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""] SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_string(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
323
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,) SCREAMING_SNAKE_CASE__ = 10 def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = { """num_train_timesteps""": 2_01, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**lowerCamelCase_ ) return config def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 10 SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0] SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1] SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = 1 scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = scheduler.timesteps SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCamelCase_ ): # 1. scale model input SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0] scheduler.set_timesteps(timesteps=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Dict = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0] with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=lowerCamelCase_ )
323
1
'''simple docstring''' from __future__ import annotations import bisect def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0 , lowerCamelCase_ = -1 ): """simple docstring""" if hi < 0: SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ) while lo < hi: SCREAMING_SNAKE_CASE : int = lo + (hi - lo) // 2 if sorted_collection[mid] < item: SCREAMING_SNAKE_CASE : str = mid + 1 else: SCREAMING_SNAKE_CASE : List[str] = mid return lo def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0 , lowerCamelCase_ = -1 ): """simple docstring""" if hi < 0: SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ) while lo < hi: SCREAMING_SNAKE_CASE : str = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1 else: SCREAMING_SNAKE_CASE : str = mid return lo def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0 , lowerCamelCase_ = -1 ): """simple docstring""" sorted_collection.insert(bisect_left(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0 , lowerCamelCase_ = -1 ): """simple docstring""" sorted_collection.insert(bisect_right(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ ) - 1 while left <= right: SCREAMING_SNAKE_CASE : str = left + (right - left) // 2 SCREAMING_SNAKE_CASE : str = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: SCREAMING_SNAKE_CASE : str = midpoint - 1 else: SCREAMING_SNAKE_CASE : Optional[int] = midpoint + 1 return None def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = bisect.bisect_left(lowerCamelCase_ , lowerCamelCase_ ) if index != len(lowerCamelCase_ ) and sorted_collection[index] == item: return index return None def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if right < left: return None SCREAMING_SNAKE_CASE : List[str] = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , midpoint - 1 ) else: return binary_search_by_recursion(lowerCamelCase_ , lowerCamelCase_ , midpoint + 1 , lowerCamelCase_ ) if __name__ == "__main__": __UpperCAmelCase = input("""Enter numbers separated by comma:\n""").strip() __UpperCAmelCase = sorted(int(item) for item in user_input.split(""",""")) __UpperCAmelCase = int(input("""Enter a single number to be found in the list:\n""")) __UpperCAmelCase = binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
323
'''simple docstring''' from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ): '''simple docstring''' super().__init__( lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE : Optional[int] = Text( cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[str] = None self.builder.download_and_prepare( download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE : int = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory ) return dataset
323
1
'''simple docstring''' from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
323
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 3_84 SCREAMING_SNAKE_CASE : Union[str, Any] = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE : List[str] = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2) SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE : Any = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE : int = 1_28 SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE : Optional[Any] = 12 SCREAMING_SNAKE_CASE : str = 5_12 elif "large" in model_name: SCREAMING_SNAKE_CASE : Tuple = 1_92 SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48) SCREAMING_SNAKE_CASE : Tuple = 12 SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68 # set label information SCREAMING_SNAKE_CASE : List[str] = 1_50 SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files""" SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig( embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) SCREAMING_SNAKE_CASE : List[str] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = val def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :] # fmt: on def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = x.shape[0] SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = x.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[ """state_dict""" ] for name, param in state_dict.items(): print(lowerCamelCase_ , param.shape ) SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ ) if "bn" in key: SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" ) SCREAMING_SNAKE_CASE : Optional[Any] = val # rename keys SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) read_in_q_k_v(lowerCamelCase_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ ) if "norm" in key: SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # verify on image SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor() SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCAmelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
323
1
'''simple docstring''' from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def __A ( lowerCamelCase_ ): """simple docstring""" return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = ArgumentParser( """HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = parser.add_subparsers(help="""datasets-cli command helpers""" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(lowerCamelCase_ ) EnvironmentCommand.register_subcommand(lowerCamelCase_ ) TestCommand.register_subcommand(lowerCamelCase_ ) RunBeamCommand.register_subcommand(lowerCamelCase_ ) DummyDataCommand.register_subcommand(lowerCamelCase_ ) # Parse args SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = parser.parse_known_args() if not hasattr(lowerCamelCase_ , """func""" ): parser.print_help() exit(1 ) SCREAMING_SNAKE_CASE : Optional[int] = parse_unknown_args(lowerCamelCase_ ) # Run SCREAMING_SNAKE_CASE : List[str] = args.func(lowerCamelCase_ , **lowerCamelCase_ ) service.run() if __name__ == "__main__": main()
323
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Any = pad_token_id SCREAMING_SNAKE_CASE : List[Any] = max_length SCREAMING_SNAKE_CASE : Optional[int] = vocab SCREAMING_SNAKE_CASE : List[Any] = merges SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab() return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ): '''simple docstring''' return cls(**lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ ) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs( lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
323
1
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = int(number**0.5 ) return number == sq * sq def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den SCREAMING_SNAKE_CASE : int = x_den * y_den * z_den SCREAMING_SNAKE_CASE : int = gcd(lowerCamelCase_ , lowerCamelCase_ ) top //= hcf bottom //= hcf return top, bottom def __A ( lowerCamelCase_ = 35 ): """simple docstring""" SCREAMING_SNAKE_CASE : set = set() SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : Fraction = Fraction(0 ) SCREAMING_SNAKE_CASE : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 SCREAMING_SNAKE_CASE : str = x_num * y_den + x_den * y_num SCREAMING_SNAKE_CASE : Optional[int] = x_den * y_den SCREAMING_SNAKE_CASE : Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE : Any = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) # n=2 SCREAMING_SNAKE_CASE : Union[str, Any] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) SCREAMING_SNAKE_CASE : Optional[int] = x_den * x_den * y_den * y_den if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = int(sqrt(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : str = int(sqrt(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE : Any = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) # n=-1 SCREAMING_SNAKE_CASE : Tuple = x_num * y_num SCREAMING_SNAKE_CASE : Union[str, Any] = x_den * y_num + x_num * y_den SCREAMING_SNAKE_CASE : Dict = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE : Optional[int] = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) # n=2 SCREAMING_SNAKE_CASE : Any = x_num * x_num * y_num * y_num SCREAMING_SNAKE_CASE : Optional[Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Dict = int(sqrt(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = int(sqrt(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : str = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE : List[str] = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) for num, den in unique_s: total += Fraction(lowerCamelCase_ , lowerCamelCase_ ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
323
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train""" SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Union[str, Any] = streaming SCREAMING_SNAKE_CASE : Optional[int] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Any ): '''simple docstring''' pass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : int = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Tuple = streaming SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Dict ): '''simple docstring''' pass
323
1
'''simple docstring''' import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __UpperCAmelCase = 16 __UpperCAmelCase = 32 def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 16 ): """simple docstring""" SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained("""bert-base-cased""" ) SCREAMING_SNAKE_CASE : List[str] = DatasetDict( { """train""": dataset["""train"""].select(lowerCamelCase_ ), """validation""": dataset["""train"""].select(lowerCamelCase_ ), """test""": dataset["""validation"""], } ) def tokenize_function(lowerCamelCase_ ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE : int = datasets.map( lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCamelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE : Tuple = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE : int = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE : str = 8 else: SCREAMING_SNAKE_CASE : Optional[Any] = None return tokenizer.pad( lowerCamelCase_ , padding="""longest""" , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors="""pt""" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE : Any = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = DataLoader( tokenized_datasets["""test"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) return train_dataloader, eval_dataloader, test_dataloader def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = [] # Download the dataset SCREAMING_SNAKE_CASE : Optional[int] = load_dataset("""glue""" , """mrpc""" ) # Create our splits SCREAMING_SNAKE_CASE : List[str] = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator SCREAMING_SNAKE_CASE : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : List[str] = config["""lr"""] SCREAMING_SNAKE_CASE : Optional[int] = int(config["""num_epochs"""] ) SCREAMING_SNAKE_CASE : Dict = int(config["""seed"""] ) SCREAMING_SNAKE_CASE : List[Any] = int(config["""batch_size"""] ) SCREAMING_SNAKE_CASE : Dict = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation SCREAMING_SNAKE_CASE : Union[str, Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: SCREAMING_SNAKE_CASE : Dict = batch_size // MAX_GPU_BATCH_SIZE SCREAMING_SNAKE_CASE : int = MAX_GPU_BATCH_SIZE set_seed(lowerCamelCase_ ) # New Code # # Create our folds: SCREAMING_SNAKE_CASE : int = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) SCREAMING_SNAKE_CASE : int = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = get_fold_dataloaders( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE : List[str] = AdamW(params=model.parameters() , lr=lowerCamelCase_ ) # Instantiate scheduler SCREAMING_SNAKE_CASE : List[str] = get_linear_schedule_with_warmup( optimizer=lowerCamelCase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Now we train the model for epoch in range(lowerCamelCase_ ): model.train() for step, batch in enumerate(lowerCamelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) SCREAMING_SNAKE_CASE : str = model(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.loss SCREAMING_SNAKE_CASE : List[str] = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE : int = model(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowerCamelCase_ , references=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , lowerCamelCase_ ) # New Code # # We also run predictions on the test set at the very end SCREAMING_SNAKE_CASE : List[str] = [] for step, batch in enumerate(lowerCamelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Union[str, Any] = model(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowerCamelCase_ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: SCREAMING_SNAKE_CASE : int = torch.cat(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[int] = torch.stack(lowerCamelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=lowerCamelCase_ , references=lowerCamelCase_ ) accelerator.print("""Average test metrics from all folds:""" , lowerCamelCase_ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=lowerCamelCase_ , default=3 , help="""The number of splits to perform across the dataset""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE : int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": main()
323
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small""" SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = """en_speaker_1""" SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string""" SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json""" SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings""" def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) SCREAMING_SNAKE_CASE : List[str] = 35 SCREAMING_SNAKE_CASE : List[Any] = 2 SCREAMING_SNAKE_CASE : int = 8 SCREAMING_SNAKE_CASE : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string ) SCREAMING_SNAKE_CASE : Tuple = tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
323
1
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __UpperCAmelCase = [ """python""", """tqdm""", """regex""", """requests""", """packaging""", """filelock""", """numpy""", """tokenizers""", """huggingface-hub""", """safetensors""", """accelerate""", """pyyaml""", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def __A ( lowerCamelCase_ , lowerCamelCase_=None ): """simple docstring""" require_version(deps[pkg] , lowerCamelCase_ )
323
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCAmelCase = logging.getLogger(__name__) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return (preds == labels).mean() @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) SCREAMING_SNAKE_CASE__ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ ) # Set seed set_seed(training_args.seed ) try: SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]() SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels() SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ ) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , ) # Get datasets SCREAMING_SNAKE_CASE : Optional[Any] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) SCREAMING_SNAKE_CASE : Dict = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase_ ) -> Dict: SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )} # Data collator SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer SCREAMING_SNAKE_CASE : Any = Trainer( model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE : Optional[Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate() SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase_ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(lowerCamelCase_ ) return results def __A ( lowerCamelCase_ ): """simple docstring""" main() if __name__ == "__main__": main()
323
1
'''simple docstring''' from copy import deepcopy class UpperCamelCase__ : """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : list[int] | None = None , lowerCamelCase_ : int | None = None ): '''simple docstring''' if arr is None and size is not None: SCREAMING_SNAKE_CASE : Dict = size SCREAMING_SNAKE_CASE : Any = [0] * size elif arr is not None: self.init(lowerCamelCase_ ) else: raise ValueError("""Either arr or size must be specified""" ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : list[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowerCamelCase_ ) for i in range(1 , self.size ): SCREAMING_SNAKE_CASE : Union[str, Any] = self.next_(lowerCamelCase_ ) if j < self.size: self.tree[j] += self.tree[i] def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE : List[str] = self.next_(lowerCamelCase_ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : int ): '''simple docstring''' return index + (index & (-index)) @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : int ): '''simple docstring''' return index - (index & (-index)) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : int ): '''simple docstring''' if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value SCREAMING_SNAKE_CASE : List[str] = self.next_(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : int ): '''simple docstring''' self.add(lowerCamelCase_ , value - self.get(lowerCamelCase_ ) ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : int ): '''simple docstring''' if right == 0: return 0 SCREAMING_SNAKE_CASE : Optional[Any] = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] SCREAMING_SNAKE_CASE : int = self.prev(lowerCamelCase_ ) return result def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : int ): '''simple docstring''' return self.prefix(lowerCamelCase_ ) - self.prefix(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ): '''simple docstring''' return self.query(lowerCamelCase_ , index + 1 ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int ): '''simple docstring''' value -= self.tree[0] if value < 0: return -1 SCREAMING_SNAKE_CASE : Tuple = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 SCREAMING_SNAKE_CASE : Dict = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
323
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block SCREAMING_SNAKE_CASE : int = torch.nn.Convad( lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] ) # down SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0] for i, down_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = output_channel SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i] SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block( lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) self.down_blocks.append(lowerCamelCase_ ) # mid SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # out SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU() SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Tuple = False def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = x SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[Any] ): def custom_forward(*lowerCamelCase_ : List[str] ): return module(*lowerCamelCase_ ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ ) else: # down for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ ) # post-process SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : int = layers_per_block SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad( lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] ) SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None # mid SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # up SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : List[Any] = get_up_block( lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , ) self.up_blocks.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = output_channel # out if norm_type == "spatial": SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : Dict = nn.SiLU() SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Dict = False def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = z SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[str] ): def custom_forward(*lowerCamelCase_ : str ): return module(*lowerCamelCase_ ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ ) # post-process if latent_embeds is None: SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Tuple = n_e SCREAMING_SNAKE_CASE : int = vq_embed_dim SCREAMING_SNAKE_CASE : Tuple = beta SCREAMING_SNAKE_CASE : Union[str, Any] = legacy SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) SCREAMING_SNAKE_CASE : Optional[Any] = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0] SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed SCREAMING_SNAKE_CASE : Any = self.re_embed + 1 print( f'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' f'''Using {self.unknown_index} for unknown indices.''' ) else: SCREAMING_SNAKE_CASE : Optional[int] = n_e SCREAMING_SNAKE_CASE : Any = sane_index_shape def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 ) SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1 if self.unknown_index == "random": SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: SCREAMING_SNAKE_CASE : Any = self.unknown_index return new.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) if self.re_embed > self.used.shape[0]: # extra token SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ ) return back.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous() SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 ) SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape ) SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : List[str] = None # compute loss for embedding if not self.legacy: SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach() # reshape back to match original input shape SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): '''simple docstring''' if self.remap is not None: SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again # get quantized latent vectors SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ ) if shape is not None: SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ ) # reshape back to match original input shape SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = parameters SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 ) SCREAMING_SNAKE_CASE : Dict = deterministic SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar ) SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar ) if self.deterministic: SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = randn_tensor( self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample return x def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.mean
323
1
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return round(float(moles / volume ) * nfactor ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return round(float((moles * 0.0_821 * temperature) / (volume) ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return round(float((moles * 0.0_821 * temperature) / (pressure) ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return round(float((pressure * volume) / (0.0_821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
323
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : str = 3 SCREAMING_SNAKE_CASE : List[Any] = (32, 32) SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input return init_dict, inputs_dict
323
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Any = BlipImageProcessor() SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) SCREAMING_SNAKE_CASE : Optional[int] = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) SCREAMING_SNAKE_CASE : Optional[int] = InstructBlipProcessor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Any , **lowerCamelCase_ : Tuple ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).tokenizer def lowerCamelCase_ ( self : Optional[int] , **lowerCamelCase_ : Dict ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor def lowerCamelCase_ ( self : Optional[Any] , **lowerCamelCase_ : int ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).qformer_tokenizer def lowerCamelCase_ ( self : Dict ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] SCREAMING_SNAKE_CASE : Optional[int] = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 ) SCREAMING_SNAKE_CASE : str = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase_ ) self.assertIsInstance(processor.qformer_tokenizer , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.get_image_processor() SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = InstructBlipProcessor( tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ , qformer_tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""np""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=lowerCamelCase_ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE : Any = InstructBlipProcessor( tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ , qformer_tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = """lower newer""" SCREAMING_SNAKE_CASE : Dict = processor(text=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer(lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = qformer_tokenizer(lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.get_image_processor() SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE : Optional[Any] = InstructBlipProcessor( tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ , qformer_tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = """lower newer""" SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE : Dict = processor(text=lowerCamelCase_ , images=lowerCamelCase_ ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase_ ): processor() def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE : int = self.get_tokenizer() SCREAMING_SNAKE_CASE : Tuple = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = InstructBlipProcessor( tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ , qformer_tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE : Optional[int] = processor.batch_decode(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.get_image_processor() SCREAMING_SNAKE_CASE : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE : Dict = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE : str = InstructBlipProcessor( tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ , qformer_tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = """lower newer""" SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs() SCREAMING_SNAKE_CASE : List[Any] = processor(text=lowerCamelCase_ , images=lowerCamelCase_ ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
323
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' pass def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ ) import datasets SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) SCREAMING_SNAKE_CASE : Any = depth_estimator( [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] ) self.assertEqual( [ {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, ] , lowerCamelCase_ , ) @require_tf @unittest.skip("""Depth estimation is not implemented in TF""" ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large""" SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
323
1
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (PNDMScheduler,) SCREAMING_SNAKE_CASE__ = (('''num_inference_steps''', 50),) def lowerCamelCase_ ( self : str , **lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = { """num_train_timesteps""": 10_00, """beta_start""": 0.0_001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**lowerCamelCase_ ) return config def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any]=0 , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = dict(self.forward_default_kwargs ) SCREAMING_SNAKE_CASE : int = kwargs.pop("""num_inference_steps""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample SCREAMING_SNAKE_CASE : Optional[Any] = 0.1 * sample SCREAMING_SNAKE_CASE : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) # copy over dummy past residuals SCREAMING_SNAKE_CASE : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class.from_pretrained(lowerCamelCase_ ) new_scheduler.set_timesteps(lowerCamelCase_ ) # copy over dummy past residuals SCREAMING_SNAKE_CASE : int = dummy_past_residuals[:] SCREAMING_SNAKE_CASE : Tuple = scheduler.step_prk(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Tuple = new_scheduler.step_prk(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step_plms(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : List[str] = new_scheduler.step_plms(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase_ ( self : str ): '''simple docstring''' pass def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple=0 , **lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = dict(self.forward_default_kwargs ) SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""num_inference_steps""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample SCREAMING_SNAKE_CASE : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE : str = scheduler_class(**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) # copy over dummy past residuals (must be after setting timesteps) SCREAMING_SNAKE_CASE : str = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = scheduler_class.from_pretrained(lowerCamelCase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCamelCase_ ) # copy over dummy past residual (must be after setting timesteps) SCREAMING_SNAKE_CASE : str = dummy_past_residuals[:] SCREAMING_SNAKE_CASE : str = scheduler.step_prk(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : List[str] = new_scheduler.step_prk(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE : List[Any] = scheduler.step_plms(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = new_scheduler.step_plms(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase_ ( self : Dict , **lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : str = self.get_scheduler_config(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = 10 SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model() SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter scheduler.set_timesteps(lowerCamelCase_ ) for i, t in enumerate(scheduler.prk_timesteps ): SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step_prk(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step_plms(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample return sample def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = dict(self.forward_default_kwargs ) SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""num_inference_steps""" , lowerCamelCase_ ) for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCamelCase_ , """set_timesteps""" ): scheduler.set_timesteps(lowerCamelCase_ ) elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , """set_timesteps""" ): SCREAMING_SNAKE_CASE : str = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) SCREAMING_SNAKE_CASE : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] SCREAMING_SNAKE_CASE : Optional[int] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE : List[str] = scheduler.step_prk(lowerCamelCase_ , 0 , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Any = scheduler.step_prk(lowerCamelCase_ , 1 , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) SCREAMING_SNAKE_CASE : Dict = scheduler.step_plms(lowerCamelCase_ , 0 , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step_plms(lowerCamelCase_ , 1 , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config(steps_offset=1 ) SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCamelCase_ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def lowerCamelCase_ ( self : str ): '''simple docstring''' for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=lowerCamelCase_ , beta_end=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 27 for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample SCREAMING_SNAKE_CASE : Tuple = 0.1 * sample SCREAMING_SNAKE_CASE : int = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): SCREAMING_SNAKE_CASE : Any = scheduler.step_prk(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' with self.assertRaises(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : int = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**lowerCamelCase_ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.full_loop() SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 198.1_318 ) < 1e-2 assert abs(result_mean.item() - 0.2_580 ) < 1e-3 def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.full_loop(prediction_type="""v_prediction""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 67.3_986 ) < 1e-2 assert abs(result_mean.item() - 0.0_878 ) < 1e-3 def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.full_loop(set_alpha_to_one=lowerCamelCase_ , beta_start=0.01 ) SCREAMING_SNAKE_CASE : List[str] = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 230.0_399 ) < 1e-2 assert abs(result_mean.item() - 0.2_995 ) < 1e-3 def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.full_loop(set_alpha_to_one=lowerCamelCase_ , beta_start=0.01 ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 186.9_482 ) < 1e-2 assert abs(result_mean.item() - 0.2_434 ) < 1e-3
323
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18} SCREAMING_SNAKE_CASE : Optional[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = min_resolution SCREAMING_SNAKE_CASE : List[str] = max_resolution SCREAMING_SNAKE_CASE : str = do_resize SCREAMING_SNAKE_CASE : Optional[Any] = size SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize SCREAMING_SNAKE_CASE : List[Any] = image_mean SCREAMING_SNAKE_CASE : str = image_std def lowerCamelCase_ ( self : Any ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self ) @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' pass def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
323
1
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __UpperCAmelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __UpperCAmelCase = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) __UpperCAmelCase = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions __UpperCAmelCase = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(64, 64) ) __UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image) __UpperCAmelCase = np.expand_dims(test_image, axis=0) __UpperCAmelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __UpperCAmelCase = """Normal""" if result[0][0] == 1: __UpperCAmelCase = """Abnormality detected"""
323
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCAmelCase = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
323
1
'''simple docstring''' # Imports import numpy as np class UpperCamelCase__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : int=None ): '''simple docstring''' self.set_matricies(red=lowerCamelCase_ , green=lowerCamelCase_ , blue=lowerCamelCase_ , red_edge=lowerCamelCase_ , nir=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : List[Any]=None ): '''simple docstring''' if red is not None: SCREAMING_SNAKE_CASE : List[Any] = red if green is not None: SCREAMING_SNAKE_CASE : Tuple = green if blue is not None: SCREAMING_SNAKE_CASE : Optional[int] = blue if red_edge is not None: SCREAMING_SNAKE_CASE : Optional[Any] = red_edge if nir is not None: SCREAMING_SNAKE_CASE : List[Any] = nir return True def lowerCamelCase_ ( self : int , lowerCamelCase_ : int="" , lowerCamelCase_ : str=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=None ): '''simple docstring''' self.set_matricies(red=lowerCamelCase_ , green=lowerCamelCase_ , blue=lowerCamelCase_ , red_edge=lowerCamelCase_ , nir=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = { """ARVI2""": self.arvaa, """CCCI""": self.ccci, """CVI""": self.cvi, """GLI""": self.gli, """NDVI""": self.ndvi, """BNDVI""": self.bndvi, """redEdgeNDVI""": self.red_edge_ndvi, """GNDVI""": self.gndvi, """GBNDVI""": self.gbndvi, """GRNDVI""": self.grndvi, """RBNDVI""": self.rbndvi, """PNDVI""": self.pndvi, """ATSAVI""": self.atsavi, """BWDRVI""": self.bwdrvi, """CIgreen""": self.ci_green, """CIrededge""": self.ci_rededge, """CI""": self.ci, """CTVI""": self.ctvi, """GDVI""": self.gdvi, """EVI""": self.evi, """GEMI""": self.gemi, """GOSAVI""": self.gosavi, """GSAVI""": self.gsavi, """Hue""": self.hue, """IVI""": self.ivi, """IPVI""": self.ipvi, """I""": self.i, """RVI""": self.rvi, """MRVI""": self.mrvi, """MSAVI""": self.m_savi, """NormG""": self.norm_g, """NormNIR""": self.norm_nir, """NormR""": self.norm_r, """NGRDI""": self.ngrdi, """RI""": self.ri, """S""": self.s, """IF""": self._if, """DVI""": self.dvi, """TVI""": self.tvi, """NDRE""": self.ndre, } try: return funcs[index]() except KeyError: print("""Index not in the list!""" ) return False def lowerCamelCase_ ( self : int ): '''simple docstring''' return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return self.nir * (self.red / (self.green**2)) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' return (self.nir - self.red) / (self.nir + self.red) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return (self.nir - self.blue) / (self.nir + self.blue) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return (self.redEdge - self.red) / (self.redEdge + self.red) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green) def lowerCamelCase_ ( self : Any ): '''simple docstring''' return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Any=0.08 , lowerCamelCase_ : Optional[int]=1.22 , lowerCamelCase_ : List[str]=0.03 ): '''simple docstring''' return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return (self.nir / self.green) - 1 def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return (self.nir / self.redEdge) - 1 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return (self.red - self.blue) / self.red def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return self.nir - self.green def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red) def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[Any]=0.16 ): '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green + y) def lowerCamelCase_ ( self : str , lowerCamelCase_ : int=0.5 ): '''simple docstring''' return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=None ): '''simple docstring''' return (self.nir - b) / (a * self.red) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return (self.red + self.green + self.blue) / 30.5 def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.nir / self.red def lowerCamelCase_ ( self : int ): '''simple docstring''' return (self.rvi() - 1) / (self.rvi() + 1) def lowerCamelCase_ ( self : Any ): '''simple docstring''' return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return self.green / (self.nir + self.red + self.green) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.nir / (self.nir + self.red + self.green) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return self.red / (self.nir + self.red + self.green) def lowerCamelCase_ ( self : str ): '''simple docstring''' return (self.green - self.red) / (self.green + self.red) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return (self.red - self.green) / (self.red + self.green) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) SCREAMING_SNAKE_CASE : Optional[int] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return self.nir / self.red def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return (self.ndvi() + 0.5) ** (1 / 2) def lowerCamelCase_ ( self : str ): '''simple docstring''' return (self.nir - self.redEdge) / (self.nir + self.redEdge)
323
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
323
1
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Any=13 , lowerCamelCase_ : Any=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : str=[10, 20, 30, 40] , lowerCamelCase_ : List[Any]=[2, 2, 3, 2] , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Tuple=37 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : Optional[Any]=10 , lowerCamelCase_ : Dict=0.02 , lowerCamelCase_ : Any=["stage2", "stage3", "stage4"] , lowerCamelCase_ : List[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = parent SCREAMING_SNAKE_CASE : List[Any] = batch_size SCREAMING_SNAKE_CASE : Dict = image_size SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = num_stages SCREAMING_SNAKE_CASE : str = hidden_sizes SCREAMING_SNAKE_CASE : List[str] = depths SCREAMING_SNAKE_CASE : Union[str, Any] = is_training SCREAMING_SNAKE_CASE : Tuple = use_labels SCREAMING_SNAKE_CASE : List[str] = intermediate_size SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : List[str] = num_labels SCREAMING_SNAKE_CASE : Any = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = out_features SCREAMING_SNAKE_CASE : str = out_indices SCREAMING_SNAKE_CASE : str = scope def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Any = None if self.use_labels: SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE : List[Any] = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = ConvNextVaModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = ConvNextVaForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = ConvNextVaBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : int = ConvNextVaBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE : str = {"""pixel_values""": pixel_values} return config, inputs_dict def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = config_and_inputs SCREAMING_SNAKE_CASE : List[str] = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ConvNextVaModelTester(self ) SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def lowerCamelCase_ ( self : str ): '''simple docstring''' pass def lowerCamelCase_ ( self : Dict ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_with_labels() SCREAMING_SNAKE_CASE : int = True if model_class.__name__ in [ *get_values(lowerCamelCase_ ), *get_values(lowerCamelCase_ ), ]: continue SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.train() SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = model(**lowerCamelCase_ ).loss loss.backward() def lowerCamelCase_ ( self : int ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_with_labels() SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : List[str] = True if ( model_class.__name__ in [*get_values(lowerCamelCase_ ), *get_values(lowerCamelCase_ )] or not model_class.supports_gradient_checkpointing ): continue SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.gradient_checkpointing_enable() model.train() SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = model(**lowerCamelCase_ ).loss loss.backward() def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = model_class(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any ): SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE : int = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Tuple = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Optional[int] = ConvNextVaModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase_ ( self : int ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = self.default_image_processor SCREAMING_SNAKE_CASE : Optional[int] = prepare_img() SCREAMING_SNAKE_CASE : Union[str, Any] = preprocessor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Any = model(**lowerCamelCase_ ) # verify the logits SCREAMING_SNAKE_CASE : Any = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
323
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number | (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number & ~(1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number ^ (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return ((number >> position) & 1) == 1 def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
323
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): '''simple docstring''' super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {} SCREAMING_SNAKE_CASE : List[Any] = {} if prompt is not None: SCREAMING_SNAKE_CASE : List[Any] = prompt if generate_kwargs is not None: SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: SCREAMING_SNAKE_CASE : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ): '''simple docstring''' return super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ ) if prompt is not None: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type if model_type == "git": SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCamelCase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: SCREAMING_SNAKE_CASE : Optional[Any] = None return model_inputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ): '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): SCREAMING_SNAKE_CASE : List[str] = None if generate_kwargs is None: SCREAMING_SNAKE_CASE : int = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name ) SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ ) return model_outputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [] for output_ids in model_outputs: SCREAMING_SNAKE_CASE : List[Any] = { """generated_text""": self.tokenizer.decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , ) } records.append(lowerCamelCase_ ) return records
323
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Optional[int] = batch_size SCREAMING_SNAKE_CASE : Any = seq_length SCREAMING_SNAKE_CASE : List[str] = is_training SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : List[Any] = rotary_dim SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = vocab_size - 1 SCREAMING_SNAKE_CASE : str = vocab_size - 1 SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1 def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : List[str] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 20 SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : str = model( input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 20 SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Dict = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @tooslow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id SCREAMING_SNAKE_CASE : str = jax.jit(model.generate ) SCREAMING_SNAKE_CASE : str = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = 1 SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = fx_state with torch.no_grad(): SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Dict = 1 SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Tuple = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase_ )
323
1
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") >>> repo = \"openai/shap-e-img2img\" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\" >>> image = load_image(image_url).convert(\"RGB\") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\") ``` """ @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : PriorTransformer , lowerCamelCase_ : CLIPVisionModel , lowerCamelCase_ : CLIPImageProcessor , lowerCamelCase_ : HeunDiscreteScheduler , lowerCamelCase_ : ShapERenderer , ): '''simple docstring''' super().__init__() self.register_modules( prior=lowerCamelCase_ , image_encoder=lowerCamelCase_ , image_processor=lowerCamelCase_ , scheduler=lowerCamelCase_ , renderer=lowerCamelCase_ , ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' if latents is None: SCREAMING_SNAKE_CASE : Optional[Any] = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) SCREAMING_SNAKE_CASE : Tuple = latents.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = latents * scheduler.init_noise_sigma return latents def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Any=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) SCREAMING_SNAKE_CASE : int = torch.device(f'''cuda:{gpu_id}''' ) SCREAMING_SNAKE_CASE : str = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase_ , lowerCamelCase_ ) @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(lowerCamelCase_ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , ): '''simple docstring''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(image[0] , torch.Tensor ): SCREAMING_SNAKE_CASE : Dict = torch.cat(lowerCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCamelCase_ , axis=0 ) if not isinstance(lowerCamelCase_ , torch.Tensor ): SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) SCREAMING_SNAKE_CASE : List[str] = image.to(dtype=self.image_encoder.dtype , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.image_encoder(lowerCamelCase_ )["""last_hidden_state"""] SCREAMING_SNAKE_CASE : List[str] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 SCREAMING_SNAKE_CASE : List[str] = image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) if do_classifier_free_guidance: SCREAMING_SNAKE_CASE : Dict = torch.zeros_like(lowerCamelCase_ ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes SCREAMING_SNAKE_CASE : str = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(lowerCamelCase_ ) def __call__( self : Any , lowerCamelCase_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 25 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : float = 4.0 , lowerCamelCase_ : int = 64 , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ): '''simple docstring''' if isinstance(lowerCamelCase_ , PIL.Image.Image ): SCREAMING_SNAKE_CASE : int = 1 elif isinstance(lowerCamelCase_ , torch.Tensor ): SCREAMING_SNAKE_CASE : Dict = image.shape[0] elif isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): SCREAMING_SNAKE_CASE : int = len(lowerCamelCase_ ) else: raise ValueError( f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCamelCase_ )}''' ) SCREAMING_SNAKE_CASE : List[str] = self._execution_device SCREAMING_SNAKE_CASE : List[str] = batch_size * num_images_per_prompt SCREAMING_SNAKE_CASE : Union[str, Any] = guidance_scale > 1.0 SCREAMING_SNAKE_CASE : List[Any] = self._encode_image(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # prior self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.timesteps SCREAMING_SNAKE_CASE : List[str] = self.prior.config.num_embeddings SCREAMING_SNAKE_CASE : str = self.prior.config.embedding_dim SCREAMING_SNAKE_CASE : str = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim SCREAMING_SNAKE_CASE : int = latents.reshape(latents.shape[0] , lowerCamelCase_ , lowerCamelCase_ ) for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE : str = self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.prior( lowerCamelCase_ , timestep=lowerCamelCase_ , proj_embedding=lowerCamelCase_ , ).predicted_image_embedding # remove the variance SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step( lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = [] for i, latent in enumerate(lowerCamelCase_ ): print() SCREAMING_SNAKE_CASE : List[str] = self.renderer.decode( latent[None, :] , lowerCamelCase_ , size=lowerCamelCase_ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , ) images.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = torch.stack(lowerCamelCase_ ) if output_type not in ["np", "pil"]: raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) SCREAMING_SNAKE_CASE : str = images.cpu().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : Optional[int] = [self.numpy_to_pil(lowerCamelCase_ ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=lowerCamelCase_ )
323
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' f''' `n_embd`: {n_embd} are not equal.''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim SCREAMING_SNAKE_CASE : Tuple = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : str = ( nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : Any = GPTaConfig( vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 ) SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ): '''simple docstring''' return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.encode_prefix(lowerCamelCase_ ) @torch.no_grad() def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 ) SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : Tuple = [] for feature in features: SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature # Only support beam search for now SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam( input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = eos_token_id SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool ) if input_embeds is not None: SCREAMING_SNAKE_CASE : Dict = input_embeds else: SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ ) for i in range(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = outputs.logits SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log() if scores is None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: SCREAMING_SNAKE_CASE : List[Any] = next_tokens else: SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] ) SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 ) else: SCREAMING_SNAKE_CASE : Tuple = -float(np.inf ) SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits seq_lengths[~is_stopped] += 1 SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1] SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source] SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1] SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 ) SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source] SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source] SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source] SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 ) SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze() if is_stopped.all(): break SCREAMING_SNAKE_CASE : int = scores / seq_lengths SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ ) # tokens tensors are already padded to max_seq_length SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order] SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
323
1
'''simple docstring''' import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __UpperCAmelCase = """.""" if __name__ == "__main__": __UpperCAmelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""") __UpperCAmelCase = [] __UpperCAmelCase = [] with open(doctest_file_path) as fp: for line in fp: __UpperCAmelCase = line.strip() __UpperCAmelCase = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __UpperCAmelCase = """\n""".join(non_existent_paths) raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''') if all_paths != sorted(all_paths): raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
323
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git_vision_model''' def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : str = attention_dropout SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : List[str] = hidden_act @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ): '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git''' def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ ) if vision_config is None: SCREAMING_SNAKE_CASE : Any = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : Tuple = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : str = hidden_act SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings SCREAMING_SNAKE_CASE : int = num_image_with_embedding SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id SCREAMING_SNAKE_CASE : str = eos_token_id def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
323
1
'''simple docstring''' import os from distutils.util import strtobool def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" for e in env_keys: SCREAMING_SNAKE_CASE : Tuple = int(os.environ.get(lowerCamelCase_ , -1 ) ) if val >= 0: return val return default def __A ( lowerCamelCase_ , lowerCamelCase_=False ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = os.environ.get(lowerCamelCase_ , str(lowerCamelCase_ ) ) return strtobool(lowerCamelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int... def __A ( lowerCamelCase_ , lowerCamelCase_="no" ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = os.environ.get(lowerCamelCase_ , str(lowerCamelCase_ ) ) return value
323
'''simple docstring''' from manim import * class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 ) SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 ) SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) model.move_to([3, -1.0, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i, rect in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 ) target.move_to(lowerCamelCase_ ) model_arr.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(lowerCamelCase_ ) self.add(*lowerCamelCase_ , *lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) disk.move_to([-4, -1.25, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 ) input.set_fill(lowerCamelCase_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 ) self.play(Write(lowerCamelCase_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 ) self.play(MoveToTarget(lowerCamelCase_ ) ) self.play(FadeOut(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02} self.play( Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) SCREAMING_SNAKE_CASE : Optional[int] = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) SCREAMING_SNAKE_CASE : Any = AnimationGroup( FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(lowerCamelCase_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: SCREAMING_SNAKE_CASE : Optional[Any] = 0.7 self.play( Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = a_c SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , ) SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) ) self.wait()
323
1
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = Dict[str, Any] __UpperCAmelCase = List[Prediction] @add_end_docstrings(lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : List[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Dict ): '''simple docstring''' super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , """vision""" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def lowerCamelCase_ ( self : Union[str, Any] , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = {} if "threshold" in kwargs: SCREAMING_SNAKE_CASE : Tuple = kwargs["""threshold"""] return {}, {}, postprocess_kwargs def __call__( self : Optional[Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : int ): '''simple docstring''' return super().__call__(*lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = load_image(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = torch.IntTensor([[image.height, image.width]] ) SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=[image] , return_tensors="""pt""" ) if self.tokenizer is not None: SCREAMING_SNAKE_CASE : Dict = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE : List[Any] = target_size return inputs def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = model_inputs.pop("""target_size""" ) SCREAMING_SNAKE_CASE : Tuple = self.model(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = outputs.__class__({"""target_size""": target_size, **outputs} ) if self.tokenizer is not None: SCREAMING_SNAKE_CASE : str = model_inputs["""bbox"""] return model_outputs def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any]=0.9 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = model_outputs["""target_size"""] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = target_size[0].tolist() def unnormalize(lowerCamelCase_ : str ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 10_00), (height * bbox[1] / 10_00), (width * bbox[2] / 10_00), (height * bbox[3] / 10_00), ] ) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) SCREAMING_SNAKE_CASE : Optional[int] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] SCREAMING_SNAKE_CASE : Union[str, Any] = [unnormalize(lowerCamelCase_ ) for bbox in model_outputs["""bbox"""].squeeze(0 )] SCREAMING_SNAKE_CASE : Optional[int] = ["""score""", """label""", """box"""] SCREAMING_SNAKE_CASE : List[Any] = [dict(zip(lowerCamelCase_ , lowerCamelCase_ ) ) for vals in zip(scores.tolist() , lowerCamelCase_ , lowerCamelCase_ ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor.post_process_object_detection(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = raw_annotations[0] SCREAMING_SNAKE_CASE : Tuple = raw_annotation["""scores"""] SCREAMING_SNAKE_CASE : Optional[int] = raw_annotation["""labels"""] SCREAMING_SNAKE_CASE : Dict = raw_annotation["""boxes"""] SCREAMING_SNAKE_CASE : List[Any] = scores.tolist() SCREAMING_SNAKE_CASE : Any = [self.model.config.idalabel[label.item()] for label in labels] SCREAMING_SNAKE_CASE : Tuple = [self._get_bounding_box(lowerCamelCase_ ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] SCREAMING_SNAKE_CASE : Union[str, Any] = ["""score""", """label""", """box"""] SCREAMING_SNAKE_CASE : Union[str, Any] = [ dict(zip(lowerCamelCase_ , lowerCamelCase_ ) ) for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] ) ] return annotation def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : "torch.Tensor" ): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = box.int().tolist() SCREAMING_SNAKE_CASE : str = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
323
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = graph # mapping node to its parent in resulting breadth first tree SCREAMING_SNAKE_CASE : dict[str, str | None] = {} SCREAMING_SNAKE_CASE : List[str] = source_vertex def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex} SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue while queue: SCREAMING_SNAKE_CASE : str = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = vertex queue.append(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ ) if target_vertex_parent is None: SCREAMING_SNAKE_CASE : Tuple = ( f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(lowerCamelCase_ ) return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}''' if __name__ == "__main__": __UpperCAmelCase = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
323
1
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def __A ( lowerCamelCase_ = "laptop" ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = f'''https://www.amazon.in/laptop/s?k={product}''' SCREAMING_SNAKE_CASE : int = { """User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", """Accept-Language""": """en-US, en;q=0.5""", } SCREAMING_SNAKE_CASE : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).text ) # Initialize a Pandas dataframe with the column titles SCREAMING_SNAKE_CASE : Any = DataFrame( columns=[ """Product Title""", """Product Link""", """Current Price of the product""", """Product Rating""", """MRP of the product""", """Discount""", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( """div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ): try: SCREAMING_SNAKE_CASE : int = item.ha.text SCREAMING_SNAKE_CASE : Tuple = """https://www.amazon.in/""" + item.ha.a["""href"""] SCREAMING_SNAKE_CASE : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text try: SCREAMING_SNAKE_CASE : Any = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text except AttributeError: SCREAMING_SNAKE_CASE : str = """Not available""" try: SCREAMING_SNAKE_CASE : Any = ( """₹""" + item.find( """span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1] ) except AttributeError: SCREAMING_SNAKE_CASE : Any = """""" try: SCREAMING_SNAKE_CASE : str = float( ( ( float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) ) - float(product_price.strip("""₹""" ).replace(""",""" , """""" ) ) ) / float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) ) ) * 1_00 ) except ValueError: SCREAMING_SNAKE_CASE : Tuple = float("""nan""" ) except AttributeError: pass SCREAMING_SNAKE_CASE : int = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] SCREAMING_SNAKE_CASE : Optional[int] = """ """ SCREAMING_SNAKE_CASE : Tuple = """ """ data_frame.index += 1 return data_frame if __name__ == "__main__": __UpperCAmelCase = """headphones""" get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
323
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __UpperCAmelCase = 0 __UpperCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __UpperCAmelCase = tuple[int, int] class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = pos_x SCREAMING_SNAKE_CASE : Any = pos_y SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x) SCREAMING_SNAKE_CASE : Tuple = goal_x SCREAMING_SNAKE_CASE : List[str] = goal_y SCREAMING_SNAKE_CASE : Optional[Any] = g_cost SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : int = self.calculate_heuristic() SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ): '''simple docstring''' return self.f_cost < other.f_cost class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = [self.start] SCREAMING_SNAKE_CASE : list[Node] = [] SCREAMING_SNAKE_CASE : str = False def lowerCamelCase_ ( self : Any ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowerCamelCase_ ) self.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCamelCase_ ) else: self.open_nodes.append(lowerCamelCase_ ) return [self.start.pos] def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [] for action in delta: SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1] SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) ) return successors def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = node SCREAMING_SNAKE_CASE : List[str] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent path.reverse() return path class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = False def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowerCamelCase_ , lowerCamelCase_ ) self.fwd_astar.closed_nodes.append(lowerCamelCase_ ) self.bwd_astar.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node SCREAMING_SNAKE_CASE : Any = current_fwd_node SCREAMING_SNAKE_CASE : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop( astar.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowerCamelCase_ ) else: astar.open_nodes.append(lowerCamelCase_ ) return [self.fwd_astar.start.pos] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ ) bwd_path.pop() bwd_path.reverse() SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __UpperCAmelCase = (0, 0) __UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __UpperCAmelCase = time.time() __UpperCAmelCase = AStar(init, goal) __UpperCAmelCase = a_star.search() __UpperCAmelCase = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') __UpperCAmelCase = time.time() __UpperCAmelCase = BidirectionalAStar(init, goal) __UpperCAmelCase = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
323
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess SCREAMING_SNAKE_CASE__ = frozenset([] ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 32 SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size # image encoding components SCREAMING_SNAKE_CASE : Any = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCamelCase_ , projection_dim=lowerCamelCase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[Any] = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase_ , layers_per_block=1 , upcast_attention=lowerCamelCase_ , use_linear_projection=lowerCamelCase_ , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL() SCREAMING_SNAKE_CASE : List[str] = { # image encoding components """feature_extractor""": feature_extractor, """image_encoder""": image_encoder.eval(), # image noising components """image_normalizer""": image_normalizer.eval(), """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder.eval(), """unet""": unet.eval(), """scheduler""": scheduler, """vae""": vae.eval(), } return components def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str=0 , lowerCamelCase_ : List[Any]=True ): '''simple docstring''' if str(lowerCamelCase_ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ ) if pil_image: SCREAMING_SNAKE_CASE : Tuple = input_image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : Dict = input_image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : str = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE : Any = DiffusionPipeline.numpy_to_pil(lowerCamelCase_ )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : Any = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(lowerCamelCase_ ) inputs.update({"""image_embeds""": None} ) SCREAMING_SNAKE_CASE : str = sd_pipe(**lowerCamelCase_ ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : Any = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = torch_device in ["""cpu""", """mps"""] self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase_ ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCamelCase_ ( self : str ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase_ ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) SCREAMING_SNAKE_CASE : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(lowerCamelCase_ , """anime turle""" , generator=lowerCamelCase_ , output_type="""np""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) SCREAMING_SNAKE_CASE : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe(lowerCamelCase_ , """anime turle""" , generator=lowerCamelCase_ , output_type="""np""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Tuple = pipe( lowerCamelCase_ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
323
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''efficientnet''' def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : int = width_coefficient SCREAMING_SNAKE_CASE : List[str] = depth_coefficient SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor SCREAMING_SNAKE_CASE : List[str] = kernel_sizes SCREAMING_SNAKE_CASE : Dict = in_channels SCREAMING_SNAKE_CASE : List[str] = out_channels SCREAMING_SNAKE_CASE : Any = depthwise_padding SCREAMING_SNAKE_CASE : Dict = strides SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats SCREAMING_SNAKE_CASE : Any = expand_ratios SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dim SCREAMING_SNAKE_CASE : List[str] = pooling_type SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : Any = batch_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum SCREAMING_SNAKE_CASE : Dict = dropout_rate SCREAMING_SNAKE_CASE : int = drop_connect_rate SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4 class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return 1e-5
323
1
'''simple docstring''' import math def __A ( lowerCamelCase_ , lowerCamelCase_ = 0 , lowerCamelCase_ = 0 ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = end or len(lowerCamelCase_ ) for i in range(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = i SCREAMING_SNAKE_CASE : Any = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: SCREAMING_SNAKE_CASE : List[str] = array[temp_index - 1] temp_index -= 1 SCREAMING_SNAKE_CASE : str = temp_index_value return array def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): # Max Heap """simple docstring""" SCREAMING_SNAKE_CASE : str = index SCREAMING_SNAKE_CASE : Any = 2 * index + 1 # Left Node SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: SCREAMING_SNAKE_CASE : Union[str, Any] = left_index if right_index < heap_size and array[largest] < array[right_index]: SCREAMING_SNAKE_CASE : Union[str, Any] = right_index if largest != index: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = array[largest], array[index] heapify(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = len(lowerCamelCase_ ) for i in range(n // 2 , -1 , -1 ): heapify(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for i in range(n - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = array[0], array[i] heapify(lowerCamelCase_ , 0 , lowerCamelCase_ ) return array def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = low SCREAMING_SNAKE_CASE : List[str] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = array[j], array[i] i += 1 def __A ( lowerCamelCase_ ): """simple docstring""" if len(lowerCamelCase_ ) == 0: return array SCREAMING_SNAKE_CASE : Dict = 2 * math.ceil(math.loga(len(lowerCamelCase_ ) ) ) SCREAMING_SNAKE_CASE : Optional[int] = 16 return intro_sort(lowerCamelCase_ , 0 , len(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(lowerCamelCase_ ) max_depth -= 1 SCREAMING_SNAKE_CASE : Optional[Any] = median_of_a(lowerCamelCase_ , lowerCamelCase_ , start + ((end - start) // 2) + 1 , end - 1 ) SCREAMING_SNAKE_CASE : List[Any] = partition(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) intro_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = p return insertion_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = input("""Enter numbers separated by a comma : """).strip() __UpperCAmelCase = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
323
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): '''simple docstring''' super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {} SCREAMING_SNAKE_CASE : List[Any] = {} if prompt is not None: SCREAMING_SNAKE_CASE : List[Any] = prompt if generate_kwargs is not None: SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: SCREAMING_SNAKE_CASE : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ): '''simple docstring''' return super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ ) if prompt is not None: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type if model_type == "git": SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCamelCase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: SCREAMING_SNAKE_CASE : Optional[Any] = None return model_inputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ): '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): SCREAMING_SNAKE_CASE : List[str] = None if generate_kwargs is None: SCREAMING_SNAKE_CASE : int = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name ) SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ ) return model_outputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [] for output_ids in model_outputs: SCREAMING_SNAKE_CASE : List[Any] = { """generated_text""": self.tokenizer.decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , ) } records.append(lowerCamelCase_ ) return records
323
1
'''simple docstring''' from collections import defaultdict def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = first_str.lower().strip() SCREAMING_SNAKE_CASE : Any = second_str.lower().strip() # Remove whitespace SCREAMING_SNAKE_CASE : Tuple = first_str.replace(""" """ , """""" ) SCREAMING_SNAKE_CASE : Optional[int] = second_str.replace(""" """ , """""" ) # Strings of different lengths are not anagrams if len(lowerCamelCase_ ) != len(lowerCamelCase_ ): return False # Default values for count should be 0 SCREAMING_SNAKE_CASE : defaultdict[str, int] = defaultdict(lowerCamelCase_ ) # For each character in input strings, # increment count in the corresponding for i in range(len(lowerCamelCase_ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() __UpperCAmelCase = input("""Enter the first string """).strip() __UpperCAmelCase = input("""Enter the second string """).strip() __UpperCAmelCase = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
323
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,) SCREAMING_SNAKE_CASE__ = 10 def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = { """num_train_timesteps""": 2_01, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**lowerCamelCase_ ) return config def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 10 SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0] SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1] SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = 1 scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = scheduler.timesteps SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCamelCase_ ): # 1. scale model input SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0] scheduler.set_timesteps(timesteps=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Dict = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0] with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=lowerCamelCase_ )
323
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block SCREAMING_SNAKE_CASE : int = torch.nn.Convad( lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] ) # down SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0] for i, down_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = output_channel SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i] SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block( lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) self.down_blocks.append(lowerCamelCase_ ) # mid SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # out SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU() SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Tuple = False def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = x SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[Any] ): def custom_forward(*lowerCamelCase_ : List[str] ): return module(*lowerCamelCase_ ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ ) else: # down for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ ) # post-process SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : int = layers_per_block SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad( lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] ) SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None # mid SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # up SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : List[Any] = get_up_block( lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , ) self.up_blocks.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = output_channel # out if norm_type == "spatial": SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : Dict = nn.SiLU() SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Dict = False def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = z SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[str] ): def custom_forward(*lowerCamelCase_ : str ): return module(*lowerCamelCase_ ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ ) # post-process if latent_embeds is None: SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Tuple = n_e SCREAMING_SNAKE_CASE : int = vq_embed_dim SCREAMING_SNAKE_CASE : Tuple = beta SCREAMING_SNAKE_CASE : Union[str, Any] = legacy SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) SCREAMING_SNAKE_CASE : Optional[Any] = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0] SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed SCREAMING_SNAKE_CASE : Any = self.re_embed + 1 print( f'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' f'''Using {self.unknown_index} for unknown indices.''' ) else: SCREAMING_SNAKE_CASE : Optional[int] = n_e SCREAMING_SNAKE_CASE : Any = sane_index_shape def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 ) SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1 if self.unknown_index == "random": SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: SCREAMING_SNAKE_CASE : Any = self.unknown_index return new.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) if self.re_embed > self.used.shape[0]: # extra token SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ ) return back.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous() SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 ) SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape ) SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : List[str] = None # compute loss for embedding if not self.legacy: SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach() # reshape back to match original input shape SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): '''simple docstring''' if self.remap is not None: SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again # get quantized latent vectors SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ ) if shape is not None: SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ ) # reshape back to match original input shape SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = parameters SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 ) SCREAMING_SNAKE_CASE : Dict = deterministic SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar ) SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar ) if self.deterministic: SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = randn_tensor( self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample return x def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.mean
323
'''simple docstring''' from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ): '''simple docstring''' super().__init__( lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE : Optional[int] = Text( cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[str] = None self.builder.download_and_prepare( download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE : int = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory ) return dataset
323
1
'''simple docstring''' from __future__ import annotations import math def __A ( lowerCamelCase_ ): """simple docstring""" if num <= 0: SCREAMING_SNAKE_CASE : Optional[int] = f'''{num}: Invalid input, please enter a positive integer.''' raise ValueError(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = [True] * (num + 1) SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Union[str, Any] = 2 SCREAMING_SNAKE_CASE : Optional[Any] = int(math.sqrt(lowerCamelCase_ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(lowerCamelCase_ ) # Set multiples of start be False for i in range(start * start , num + 1 , lowerCamelCase_ ): if sieve[i] is True: SCREAMING_SNAKE_CASE : int = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(lowerCamelCase_ ) return prime if __name__ == "__main__": print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
323
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 3_84 SCREAMING_SNAKE_CASE : Union[str, Any] = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE : List[str] = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2) SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE : Any = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE : int = 1_28 SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE : Optional[Any] = 12 SCREAMING_SNAKE_CASE : str = 5_12 elif "large" in model_name: SCREAMING_SNAKE_CASE : Tuple = 1_92 SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48) SCREAMING_SNAKE_CASE : Tuple = 12 SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68 # set label information SCREAMING_SNAKE_CASE : List[str] = 1_50 SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files""" SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig( embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) SCREAMING_SNAKE_CASE : List[str] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = val def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :] # fmt: on def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = x.shape[0] SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = x.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[ """state_dict""" ] for name, param in state_dict.items(): print(lowerCamelCase_ , param.shape ) SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ ) if "bn" in key: SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" ) SCREAMING_SNAKE_CASE : Optional[Any] = val # rename keys SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) read_in_q_k_v(lowerCamelCase_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ ) if "norm" in key: SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # verify on image SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor() SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCAmelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
323
1
'''simple docstring''' from __future__ import annotations from typing import Any class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = num_of_nodes SCREAMING_SNAKE_CASE : list[list[int]] = [] SCREAMING_SNAKE_CASE : dict[int, int] = {} def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ): '''simple docstring''' self.m_edges.append([u_node, v_node, weight] ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : int ): '''simple docstring''' if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ): '''simple docstring''' if self.m_component[u_node] != u_node: for k in self.m_component: SCREAMING_SNAKE_CASE : str = self.find_component(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : list[int] , lowerCamelCase_ : int , lowerCamelCase_ : int ): '''simple docstring''' if component_size[u_node] <= component_size[v_node]: SCREAMING_SNAKE_CASE : Any = v_node component_size[v_node] += component_size[u_node] self.set_component(lowerCamelCase_ ) elif component_size[u_node] >= component_size[v_node]: SCREAMING_SNAKE_CASE : str = self.find_component(lowerCamelCase_ ) component_size[u_node] += component_size[v_node] self.set_component(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) SCREAMING_SNAKE_CASE : str = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = edge SCREAMING_SNAKE_CASE : Any = self.m_component[u] SCREAMING_SNAKE_CASE : Optional[Any] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): SCREAMING_SNAKE_CASE : Optional[Any] = [u, v, w] for edge in minimum_weight_edge: if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = edge SCREAMING_SNAKE_CASE : Optional[Any] = self.m_component[u] SCREAMING_SNAKE_CASE : Dict = self.m_component[v] if u_component != v_component: mst_weight += w self.union(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' ) num_of_components -= 1 SCREAMING_SNAKE_CASE : List[Any] = [-1] * self.m_num_of_nodes print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' ) def __A ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
323
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Any = pad_token_id SCREAMING_SNAKE_CASE : List[Any] = max_length SCREAMING_SNAKE_CASE : Optional[int] = vocab SCREAMING_SNAKE_CASE : List[Any] = merges SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab() return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ): '''simple docstring''' return cls(**lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ ) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs( lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
323
1
'''simple docstring''' import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = logging.get_logger() # the current default level is logging.WARNING SCREAMING_SNAKE_CASE : Optional[int] = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = logging.get_verbosity() SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) SCREAMING_SNAKE_CASE : str = """Testing 1, 2, 3""" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCamelCase_ ) as cl: logger.warning(lowerCamelCase_ ) self.assertEqual(cl.out , msg + """\n""" ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCamelCase_ ) as cl: logger.warning(lowerCamelCase_ ) self.assertEqual(cl.out , """""" ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCamelCase_ ) as cl: logger.warning(lowerCamelCase_ ) self.assertEqual(cl.out , msg + """\n""" ) # restore to the original level logging.set_verbosity(lowerCamelCase_ ) @mockenv(TRANSFORMERS_VERBOSITY="""error""" ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() # this action activates the env var SCREAMING_SNAKE_CASE : int = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.getenv("""TRANSFORMERS_VERBOSITY""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = logging.log_levels[env_level_str] SCREAMING_SNAKE_CASE : Dict = logging.get_verbosity() self.assertEqual( lowerCamelCase_ , lowerCamelCase_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , ) # restore to the original level SCREAMING_SNAKE_CASE : int = """""" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="""super-error""" ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() SCREAMING_SNAKE_CASE : List[Any] = logging.logging.getLogger() with CaptureLogger(lowerCamelCase_ ) as cl: # this action activates the env var logging.get_logger("""transformers.models.bart.tokenization_bart""" ) self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out ) # no need to restore as nothing was changed def lowerCamelCase_ ( self : str ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() SCREAMING_SNAKE_CASE : str = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) SCREAMING_SNAKE_CASE : Dict = """Testing 1, 2, 3""" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCamelCase_ ) as cl: logger.warning_advice(lowerCamelCase_ ) self.assertEqual(cl.out , """""" ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCamelCase_ ) as cl: logger.warning_advice(lowerCamelCase_ ) self.assertEqual(cl.out , msg + """\n""" ) def __A ( ): """simple docstring""" disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
323
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train""" SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Union[str, Any] = streaming SCREAMING_SNAKE_CASE : Optional[int] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Any ): '''simple docstring''' pass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : int = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Tuple = streaming SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Dict ): '''simple docstring''' pass
323
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) SCREAMING_SNAKE_CASE__ = field(default=2 , metadata={'''help''': '''Batch size for training.'''} ) SCREAMING_SNAKE_CASE__ = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} ) SCREAMING_SNAKE_CASE__ = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} ) SCREAMING_SNAKE_CASE__ = field( default=1_0000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} ) SCREAMING_SNAKE_CASE__ = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} ) SCREAMING_SNAKE_CASE__ = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} ) SCREAMING_SNAKE_CASE__ = field( default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} ) SCREAMING_SNAKE_CASE__ = field( default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} ) SCREAMING_SNAKE_CASE__ = field(default=5_0000 , metadata={'''help''': '''Maximum number of training steps.'''} ) SCREAMING_SNAKE_CASE__ = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) SCREAMING_SNAKE_CASE__ = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} ) SCREAMING_SNAKE_CASE__ = field(default=1 , metadata={'''help''': '''Training seed.'''} ) SCREAMING_SNAKE_CASE__ = field( default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} ) SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''If True the data is pretokenized.'''} ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) SCREAMING_SNAKE_CASE__ = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} ) SCREAMING_SNAKE_CASE__ = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) SCREAMING_SNAKE_CASE__ = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} ) SCREAMING_SNAKE_CASE__ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} ) SCREAMING_SNAKE_CASE__ = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} ) SCREAMING_SNAKE_CASE__ = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} ) SCREAMING_SNAKE_CASE__ = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} ) SCREAMING_SNAKE_CASE__ = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} ) SCREAMING_SNAKE_CASE__ = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} ) SCREAMING_SNAKE_CASE__ = field( default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} ) SCREAMING_SNAKE_CASE__ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} ) SCREAMING_SNAKE_CASE__ = field( default=-1 , metadata={ '''help''': ( '''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive''' ''' number corresponds to which GPU device id to run on.''' ) } , ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={ '''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.''' } , ) SCREAMING_SNAKE_CASE__ = field( default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} ) SCREAMING_SNAKE_CASE__ = field( default=10_0000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} ) SCREAMING_SNAKE_CASE__ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) SCREAMING_SNAKE_CASE__ = field( default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} ) SCREAMING_SNAKE_CASE__ = field( default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} ) SCREAMING_SNAKE_CASE__ = field( default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} ) SCREAMING_SNAKE_CASE__ = field( default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} ) SCREAMING_SNAKE_CASE__ = field( default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} ) SCREAMING_SNAKE_CASE__ = field( default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} ) SCREAMING_SNAKE_CASE__ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) SCREAMING_SNAKE_CASE__ = field(default=20_0000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} ) SCREAMING_SNAKE_CASE__ = field( default=3_2768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} ) SCREAMING_SNAKE_CASE__ = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} ) SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} ) SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} ) SCREAMING_SNAKE_CASE__ = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} ) SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
323
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small""" SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = """en_speaker_1""" SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string""" SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json""" SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings""" def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) SCREAMING_SNAKE_CASE : List[str] = 35 SCREAMING_SNAKE_CASE : List[Any] = 2 SCREAMING_SNAKE_CASE : int = 8 SCREAMING_SNAKE_CASE : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string ) SCREAMING_SNAKE_CASE : Tuple = tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
323
1
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor __UpperCAmelCase = random.Random() def __A ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ): """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE : int = global_rng SCREAMING_SNAKE_CASE : List[str] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=7 , lowerCamelCase_ : str=4_00 , lowerCamelCase_ : Optional[int]=20_00 , lowerCamelCase_ : Union[str, Any]=24 , lowerCamelCase_ : str=24 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[int]=1_60_00 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[str]=True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : List[Any] = batch_size SCREAMING_SNAKE_CASE : Tuple = min_seq_length SCREAMING_SNAKE_CASE : Any = max_seq_length SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE : Optional[int] = feature_size SCREAMING_SNAKE_CASE : int = num_mel_bins SCREAMING_SNAKE_CASE : List[Any] = padding_value SCREAMING_SNAKE_CASE : str = sampling_rate SCREAMING_SNAKE_CASE : int = return_attention_mask SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Tuple=False ): '''simple docstring''' def _flatten(lowerCamelCase_ : List[Any] ): return list(itertools.chain(*lowerCamelCase_ ) ) if equal_length: SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE : Tuple = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCamelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = SpeechaTextFeatureExtractor if is_speech_available() else None def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = SpeechaTextFeatureExtractionTester(self ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' self.assertTrue(np.all(np.mean(lowerCamelCase_ , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ , axis=0 ) - 1 ) < 1e-3 ) ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE : int = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : str = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs] # Test feature size SCREAMING_SNAKE_CASE : List[str] = feature_extractor(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features SCREAMING_SNAKE_CASE : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCamelCase_ , return_tensors="""np""" ).input_features SCREAMING_SNAKE_CASE : int = feature_extractor(lowerCamelCase_ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE : List[str] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] SCREAMING_SNAKE_CASE : Any = np.asarray(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(lowerCamelCase_ , return_tensors="""np""" ).input_features SCREAMING_SNAKE_CASE : Any = feature_extractor(lowerCamelCase_ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE : int = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : str = ["""longest""", """max_length""", """do_not_pad"""] SCREAMING_SNAKE_CASE : Any = [None, 16, None] for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Dict = feature_extractor( lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = inputs.input_features SCREAMING_SNAKE_CASE : Union[str, Any] = inputs.attention_mask SCREAMING_SNAKE_CASE : List[str] = [np.sum(lowerCamelCase_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE : int = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : Tuple = ["""longest""", """max_length""", """do_not_pad"""] SCREAMING_SNAKE_CASE : Tuple = [None, 16, None] for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : List[Any] = feature_extractor( lowerCamelCase_ , max_length=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors="""np""" , return_attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = inputs.input_features SCREAMING_SNAKE_CASE : int = inputs.attention_mask SCREAMING_SNAKE_CASE : Union[str, Any] = [np.sum(lowerCamelCase_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE : Any = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : int = feature_extractor( lowerCamelCase_ , padding="""max_length""" , max_length=4 , truncation=lowerCamelCase_ , return_tensors="""np""" , return_attention_mask=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : List[Any] = inputs.input_features SCREAMING_SNAKE_CASE : int = inputs.attention_mask SCREAMING_SNAKE_CASE : Tuple = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE : Dict = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : Any = feature_extractor( lowerCamelCase_ , padding="""longest""" , max_length=4 , truncation=lowerCamelCase_ , return_tensors="""np""" , return_attention_mask=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : List[Any] = inputs.input_features SCREAMING_SNAKE_CASE : Any = inputs.attention_mask SCREAMING_SNAKE_CASE : Optional[Any] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) SCREAMING_SNAKE_CASE : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : Tuple = feature_extractor( lowerCamelCase_ , padding="""longest""" , max_length=16 , truncation=lowerCamelCase_ , return_tensors="""np""" , return_attention_mask=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = inputs.input_features SCREAMING_SNAKE_CASE : List[Any] = inputs.attention_mask SCREAMING_SNAKE_CASE : int = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' import torch SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE : Optional[Any] = np.random.rand(1_00 , 32 ).astype(np.floataa ) SCREAMING_SNAKE_CASE : Tuple = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE : Tuple = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) SCREAMING_SNAKE_CASE : List[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str ): '''simple docstring''' from datasets import load_dataset SCREAMING_SNAKE_CASE : int = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("""id""" ).select(range(lowerCamelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([ -1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241, -1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128, -1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625, ] ) # fmt: on SCREAMING_SNAKE_CASE : List[str] = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE : List[str] = feature_extractor(lowerCamelCase_ , return_tensors="""pt""" ).input_features self.assertEquals(input_features.shape , (1, 5_84, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCamelCase_ , atol=1e-4 ) )
323
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCAmelCase = logging.getLogger(__name__) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return (preds == labels).mean() @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) SCREAMING_SNAKE_CASE__ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ ) # Set seed set_seed(training_args.seed ) try: SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]() SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels() SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ ) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , ) # Get datasets SCREAMING_SNAKE_CASE : Optional[Any] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) SCREAMING_SNAKE_CASE : Dict = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase_ ) -> Dict: SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )} # Data collator SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer SCREAMING_SNAKE_CASE : Any = Trainer( model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE : Optional[Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate() SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase_ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(lowerCamelCase_ ) return results def __A ( lowerCamelCase_ ): """simple docstring""" main() if __name__ == "__main__": main()
323
1
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
323
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block SCREAMING_SNAKE_CASE : int = torch.nn.Convad( lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] ) # down SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0] for i, down_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = output_channel SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i] SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block( lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) self.down_blocks.append(lowerCamelCase_ ) # mid SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # out SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU() SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Tuple = False def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = x SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[Any] ): def custom_forward(*lowerCamelCase_ : List[str] ): return module(*lowerCamelCase_ ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ ) else: # down for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ ) # post-process SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : int = layers_per_block SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad( lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] ) SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None # mid SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # up SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : List[Any] = get_up_block( lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , ) self.up_blocks.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = output_channel # out if norm_type == "spatial": SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : Dict = nn.SiLU() SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Dict = False def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = z SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[str] ): def custom_forward(*lowerCamelCase_ : str ): return module(*lowerCamelCase_ ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ ) # post-process if latent_embeds is None: SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Tuple = n_e SCREAMING_SNAKE_CASE : int = vq_embed_dim SCREAMING_SNAKE_CASE : Tuple = beta SCREAMING_SNAKE_CASE : Union[str, Any] = legacy SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) SCREAMING_SNAKE_CASE : Optional[Any] = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0] SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed SCREAMING_SNAKE_CASE : Any = self.re_embed + 1 print( f'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' f'''Using {self.unknown_index} for unknown indices.''' ) else: SCREAMING_SNAKE_CASE : Optional[int] = n_e SCREAMING_SNAKE_CASE : Any = sane_index_shape def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 ) SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1 if self.unknown_index == "random": SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: SCREAMING_SNAKE_CASE : Any = self.unknown_index return new.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) if self.re_embed > self.used.shape[0]: # extra token SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ ) return back.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous() SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 ) SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape ) SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : List[str] = None # compute loss for embedding if not self.legacy: SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach() # reshape back to match original input shape SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): '''simple docstring''' if self.remap is not None: SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again # get quantized latent vectors SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ ) if shape is not None: SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ ) # reshape back to match original input shape SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = parameters SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 ) SCREAMING_SNAKE_CASE : Dict = deterministic SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar ) SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar ) if self.deterministic: SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = randn_tensor( self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample return x def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.mean
323
1
'''simple docstring''' import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase_ , lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = emb.weight.shape SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = emb.weight.data return lin_layer def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = torch.load(lowerCamelCase_ , map_location="""cpu""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = Namespace(**checkpoint["""cfg"""]["""model"""] ) SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint["""model"""] remove_ignore_keys_(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = state_dict["""decoder.embed_tokens.weight"""].shape[0] SCREAMING_SNAKE_CASE : Optional[Any] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} SCREAMING_SNAKE_CASE : List[str] = XGLMConfig( vocab_size=lowerCamelCase_ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) SCREAMING_SNAKE_CASE : Tuple = XGLMForCausalLM(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ ) print(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
323
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : str = 3 SCREAMING_SNAKE_CASE : List[Any] = (32, 32) SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input return init_dict, inputs_dict
323
1
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Any = pad_token_id SCREAMING_SNAKE_CASE : List[Any] = max_length SCREAMING_SNAKE_CASE : Optional[int] = vocab SCREAMING_SNAKE_CASE : List[Any] = merges SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab() return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ): '''simple docstring''' return cls(**lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ ) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs( lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
323
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' pass def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ ) import datasets SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) SCREAMING_SNAKE_CASE : Any = depth_estimator( [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] ) self.assertEqual( [ {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, ] , lowerCamelCase_ , ) @require_tf @unittest.skip("""Depth estimation is not implemented in TF""" ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large""" SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
323
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class UpperCamelCase__ ( lowercase_ , lowercase_ ): """simple docstring""" @register_to_config def __init__( self : Optional[int] , lowerCamelCase_ : int = 16 , lowerCamelCase_ : int = 88 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "geglu" , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : Union[str, Any] = attention_head_dim SCREAMING_SNAKE_CASE : Any = num_attention_heads * attention_head_dim SCREAMING_SNAKE_CASE : str = in_channels SCREAMING_SNAKE_CASE : Optional[Any] = torch.nn.GroupNorm(num_groups=lowerCamelCase_ , num_channels=lowerCamelCase_ , eps=1e-6 , affine=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) # 3. Define transformers blocks SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dropout=lowerCamelCase_ , cross_attention_dim=lowerCamelCase_ , activation_fn=lowerCamelCase_ , attention_bias=lowerCamelCase_ , double_self_attention=lowerCamelCase_ , norm_elementwise_affine=lowerCamelCase_ , ) for d in range(lowerCamelCase_ ) ] ) SCREAMING_SNAKE_CASE : str = nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=1 , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : bool = True , ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = hidden_states.shape SCREAMING_SNAKE_CASE : Union[str, Any] = batch_frames // num_frames SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states SCREAMING_SNAKE_CASE : Optional[int] = hidden_states[None, :].reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.proj_in(lowerCamelCase_ ) # 2. Blocks for block in self.transformer_blocks: SCREAMING_SNAKE_CASE : Optional[Any] = block( lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , timestep=lowerCamelCase_ , cross_attention_kwargs=lowerCamelCase_ , class_labels=lowerCamelCase_ , ) # 3. Output SCREAMING_SNAKE_CASE : Union[str, Any] = self.proj_out(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = ( hidden_states[None, None, :] .reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) SCREAMING_SNAKE_CASE : List[Any] = hidden_states.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=lowerCamelCase_ )
323
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18} SCREAMING_SNAKE_CASE : Optional[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = min_resolution SCREAMING_SNAKE_CASE : List[str] = max_resolution SCREAMING_SNAKE_CASE : str = do_resize SCREAMING_SNAKE_CASE : Optional[Any] = size SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize SCREAMING_SNAKE_CASE : List[Any] = image_mean SCREAMING_SNAKE_CASE : str = image_std def lowerCamelCase_ ( self : Any ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self ) @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' pass def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
323
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""", # See all Dinat models at https://huggingface.co/models?filter=dinat } class UpperCamelCase__ ( lowercase_ , lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''dinat''' SCREAMING_SNAKE_CASE__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[int]=4 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : List[Any]=64 , lowerCamelCase_ : Optional[int]=[3, 4, 6, 5] , lowerCamelCase_ : str=[2, 4, 8, 16] , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase_ : Any=3.0 , lowerCamelCase_ : int=True , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Tuple=0.02 , lowerCamelCase_ : int=1e-5 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : int=None , lowerCamelCase_ : Tuple=None , **lowerCamelCase_ : List[str] , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = patch_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : Dict = embed_dim SCREAMING_SNAKE_CASE : List[str] = depths SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = num_heads SCREAMING_SNAKE_CASE : Any = kernel_size SCREAMING_SNAKE_CASE : Optional[Any] = dilations SCREAMING_SNAKE_CASE : Dict = mlp_ratio SCREAMING_SNAKE_CASE : Optional[int] = qkv_bias SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Dict = drop_path_rate SCREAMING_SNAKE_CASE : Any = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE : Tuple = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model SCREAMING_SNAKE_CASE : Tuple = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) SCREAMING_SNAKE_CASE : Optional[int] = layer_scale_init_value SCREAMING_SNAKE_CASE : Any = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(lowerCamelCase_ ) + 1 )] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
323
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCAmelCase = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
323
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def __A ( lowerCamelCase_ ): """simple docstring""" if "cls_token" in name: SCREAMING_SNAKE_CASE : Dict = name.replace("""cls_token""" , """vit.embeddings.cls_token""" ) if "mask_token" in name: SCREAMING_SNAKE_CASE : Tuple = name.replace("""mask_token""" , """decoder.mask_token""" ) if "decoder_pos_embed" in name: SCREAMING_SNAKE_CASE : Tuple = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE : str = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE : List[str] = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" ) if "decoder_blocks" in name: SCREAMING_SNAKE_CASE : List[str] = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""blocks""" , """vit.encoder.layer""" ) if "attn.proj" in name: SCREAMING_SNAKE_CASE : Dict = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE : Any = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE : List[str] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE : Any = name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: SCREAMING_SNAKE_CASE : Dict = name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: SCREAMING_SNAKE_CASE : int = name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: SCREAMING_SNAKE_CASE : Tuple = name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name: SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""norm.weight""" , """vit.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name: SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""norm.bias""" , """vit.layernorm.bias""" ) return name def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(lowerCamelCase_ ) if "qkv" in key: SCREAMING_SNAKE_CASE : Union[str, Any] = key.split(""".""" ) SCREAMING_SNAKE_CASE : str = int(key_split[1] ) if "decoder_blocks" in key: SCREAMING_SNAKE_CASE : int = config.decoder_hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = """decoder.decoder_layers.""" if "weight" in key: SCREAMING_SNAKE_CASE : Union[str, Any] = val[:dim, :] SCREAMING_SNAKE_CASE : List[str] = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE : int = val[-dim:, :] elif "bias" in key: SCREAMING_SNAKE_CASE : Tuple = val[:dim] SCREAMING_SNAKE_CASE : List[Any] = val[dim : dim * 2] SCREAMING_SNAKE_CASE : Dict = val[-dim:] else: SCREAMING_SNAKE_CASE : Tuple = config.hidden_size SCREAMING_SNAKE_CASE : List[Any] = """vit.encoder.layer.""" if "weight" in key: SCREAMING_SNAKE_CASE : Optional[int] = val[:dim, :] SCREAMING_SNAKE_CASE : Dict = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE : str = val[-dim:, :] elif "bias" in key: SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim] SCREAMING_SNAKE_CASE : List[Any] = val[dim : dim * 2] SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:] else: SCREAMING_SNAKE_CASE : List[Any] = val return orig_state_dict def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMAEConfig() if "large" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = 10_24 SCREAMING_SNAKE_CASE : Optional[Any] = 40_96 SCREAMING_SNAKE_CASE : Any = 24 SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "huge" in checkpoint_url: SCREAMING_SNAKE_CASE : Dict = 14 SCREAMING_SNAKE_CASE : Union[str, Any] = 12_80 SCREAMING_SNAKE_CASE : Any = 51_20 SCREAMING_SNAKE_CASE : Optional[Any] = 32 SCREAMING_SNAKE_CASE : Union[str, Any] = 16 SCREAMING_SNAKE_CASE : List[str] = ViTMAEForPreTraining(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" )["""model"""] SCREAMING_SNAKE_CASE : Tuple = ViTMAEImageProcessor(size=config.image_size ) SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) model.eval() SCREAMING_SNAKE_CASE : Optional[Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg""" SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ) SCREAMING_SNAKE_CASE : Dict = ViTMAEImageProcessor(size=config.image_size ) SCREAMING_SNAKE_CASE : Any = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) SCREAMING_SNAKE_CASE : List[str] = model(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits if "large" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] ) elif "huge" in checkpoint_url: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor( [[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor( [[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) __UpperCAmelCase = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
323
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
323
1
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small""" SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = """en_speaker_1""" SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string""" SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json""" SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings""" def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) SCREAMING_SNAKE_CASE : List[str] = 35 SCREAMING_SNAKE_CASE : List[Any] = 2 SCREAMING_SNAKE_CASE : int = 8 SCREAMING_SNAKE_CASE : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string ) SCREAMING_SNAKE_CASE : Tuple = tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
323
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number | (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number & ~(1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number ^ (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return ((number >> position) & 1) == 1 def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
323
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class UpperCamelCase__ : """simple docstring""" def __init__( self : str , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any]=13 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Union[str, Any]=99 , lowerCamelCase_ : Any=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : Dict="gelu" , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : int=2 , lowerCamelCase_ : Optional[Any]=0.02 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : Dict=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = parent SCREAMING_SNAKE_CASE : Any = 13 SCREAMING_SNAKE_CASE : Tuple = 7 SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = True SCREAMING_SNAKE_CASE : Union[str, Any] = True SCREAMING_SNAKE_CASE : List[Any] = 99 SCREAMING_SNAKE_CASE : Tuple = 32 SCREAMING_SNAKE_CASE : Tuple = 2 SCREAMING_SNAKE_CASE : str = 4 SCREAMING_SNAKE_CASE : List[str] = 37 SCREAMING_SNAKE_CASE : List[Any] = """gelu""" SCREAMING_SNAKE_CASE : int = 0.1 SCREAMING_SNAKE_CASE : Dict = 0.1 SCREAMING_SNAKE_CASE : str = 5_12 SCREAMING_SNAKE_CASE : List[str] = 16 SCREAMING_SNAKE_CASE : Optional[int] = 2 SCREAMING_SNAKE_CASE : str = 0.02 SCREAMING_SNAKE_CASE : int = 3 SCREAMING_SNAKE_CASE : Optional[Any] = 4 SCREAMING_SNAKE_CASE : Dict = None def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : List[str] = None if self.use_input_mask: SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE : Tuple = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = TFRoFormerModel(config=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} SCREAMING_SNAKE_CASE : Optional[Any] = [input_ids, input_mask] SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = True SCREAMING_SNAKE_CASE : str = TFRoFormerForCausalLM(config=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )["""logits"""] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = TFRoFormerForMaskedLM(config=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.num_labels SCREAMING_SNAKE_CASE : Union[str, Any] = TFRoFormerForSequenceClassification(config=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.num_choices SCREAMING_SNAKE_CASE : List[Any] = TFRoFormerForMultipleChoice(config=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.num_labels SCREAMING_SNAKE_CASE : int = TFRoFormerForTokenClassification(config=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = TFRoFormerForQuestionAnswering(config=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ) : int = config_and_inputs SCREAMING_SNAKE_CASE : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE__ = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : str ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = TFRoFormerModelTester(self ) SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" ) self.assertIsNotNone(lowerCamelCase_ ) @require_tf class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) SCREAMING_SNAKE_CASE : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )[0] # TODO Replace vocab size SCREAMING_SNAKE_CASE : Dict = 5_00_00 SCREAMING_SNAKE_CASE : List[Any] = [1, 6, vocab_size] self.assertEqual(output.shape , lowerCamelCase_ ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. SCREAMING_SNAKE_CASE : int = tf.constant( [ [ [-0.12_053_341, -1.0_264_901, 0.29_221_946], [-1.5_133_783, 0.197_433, 0.15_190_607], [-5.0_135_403, -3.900_256, -0.84_038_764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4 ) @require_tf class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 1e-4 def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = tf.constant([[4, 10]] ) SCREAMING_SNAKE_CASE : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) SCREAMING_SNAKE_CASE : Optional[int] = emba(input_ids.shape ) SCREAMING_SNAKE_CASE : Dict = tf.constant( [[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] ) tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , atol=self.tolerance ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = tf.constant( [ [0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000], [0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617], [0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870], ] ) SCREAMING_SNAKE_CASE : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 ) emba([2, 16, 5_12] ) SCREAMING_SNAKE_CASE : List[str] = emba.weight[:3, :5] tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , atol=self.tolerance ) @require_tf class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 1e-4 def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 SCREAMING_SNAKE_CASE : List[Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 SCREAMING_SNAKE_CASE : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) SCREAMING_SNAKE_CASE : Optional[int] = embed_positions([2, 16, 7_68] )[None, None, :, :] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = TFRoFormerSelfAttention.apply_rotary_position_embeddings( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = tf.constant( [ [0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700], [-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343], [-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985], [-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871], [0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980], [3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253], ] ) SCREAMING_SNAKE_CASE : int = tf.constant( [ [0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700], [0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343], [1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985], [2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871], [-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980], [-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowerCamelCase_ , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowerCamelCase_ , atol=self.tolerance )
323
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Optional[int] = batch_size SCREAMING_SNAKE_CASE : Any = seq_length SCREAMING_SNAKE_CASE : List[str] = is_training SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : List[Any] = rotary_dim SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = vocab_size - 1 SCREAMING_SNAKE_CASE : str = vocab_size - 1 SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1 def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : List[str] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 20 SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : str = model( input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 20 SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Dict = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @tooslow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id SCREAMING_SNAKE_CASE : str = jax.jit(model.generate ) SCREAMING_SNAKE_CASE : str = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = 1 SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = fx_state with torch.no_grad(): SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Dict = 1 SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Tuple = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase_ )
323
1
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None __UpperCAmelCase = namedtuple("""CoinsDistribResult""", """moves excess""") def __A ( lowerCamelCase_ ): """simple docstring""" if root is None: return 0 # Validation def count_nodes(lowerCamelCase_ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(lowerCamelCase_ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(lowerCamelCase_ ) != count_coins(lowerCamelCase_ ): raise ValueError("""The nodes number should be same as the number of coins""" ) # Main calculation def get_distrib(lowerCamelCase_ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = get_distrib(node.left ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = get_distrib(node.right ) SCREAMING_SNAKE_CASE : Dict = 1 - left_distrib_excess SCREAMING_SNAKE_CASE : Optional[int] = 1 - right_distrib_excess SCREAMING_SNAKE_CASE : int = ( left_distrib_moves + right_distrib_moves + abs(lowerCamelCase_ ) + abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : str = node.data - coins_to_left - coins_to_right return CoinsDistribResult(lowerCamelCase_ , lowerCamelCase_ ) return get_distrib(lowerCamelCase_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
323
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' f''' `n_embd`: {n_embd} are not equal.''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim SCREAMING_SNAKE_CASE : Tuple = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : str = ( nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : Any = GPTaConfig( vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 ) SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ): '''simple docstring''' return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.encode_prefix(lowerCamelCase_ ) @torch.no_grad() def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 ) SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : Tuple = [] for feature in features: SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature # Only support beam search for now SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam( input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = eos_token_id SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool ) if input_embeds is not None: SCREAMING_SNAKE_CASE : Dict = input_embeds else: SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ ) for i in range(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = outputs.logits SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log() if scores is None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: SCREAMING_SNAKE_CASE : List[Any] = next_tokens else: SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] ) SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 ) else: SCREAMING_SNAKE_CASE : Tuple = -float(np.inf ) SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits seq_lengths[~is_stopped] += 1 SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1] SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source] SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1] SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 ) SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source] SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source] SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source] SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 ) SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze() if is_stopped.all(): break SCREAMING_SNAKE_CASE : int = scores / seq_lengths SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ ) # tokens tensors are already padded to max_seq_length SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order] SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
323
1
'''simple docstring''' import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple=7_68 ): '''simple docstring''' super().__init__(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = proj_size SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPVisionModel(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = PaintByExampleMapper(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = nn.LayerNorm(config.hidden_size ) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.model(pixel_values=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = clip_output.pooler_output SCREAMING_SNAKE_CASE : Any = self.mapper(latent_states[:, None] ) SCREAMING_SNAKE_CASE : Any = self.final_layer_norm(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.proj_out(lowerCamelCase_ ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : List[Any] ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : str = (config.num_hidden_layers + 1) // 5 SCREAMING_SNAKE_CASE : Any = config.hidden_size SCREAMING_SNAKE_CASE : Tuple = 1 SCREAMING_SNAKE_CASE : str = nn.ModuleList( [ BasicTransformerBlock(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , activation_fn="""gelu""" , attention_bias=lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) ] ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any ): '''simple docstring''' for block in self.blocks: SCREAMING_SNAKE_CASE : List[Any] = block(lowerCamelCase_ ) return hidden_states
323
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git_vision_model''' def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : str = attention_dropout SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : List[str] = hidden_act @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ): '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git''' def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ ) if vision_config is None: SCREAMING_SNAKE_CASE : Any = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : Tuple = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : str = hidden_act SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings SCREAMING_SNAKE_CASE : int = num_image_with_embedding SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id SCREAMING_SNAKE_CASE : str = eos_token_id def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
323
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : int , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : int ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : int , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : int , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : List[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : List[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[int] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : List[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : int , **lowerCamelCase_ : Any ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Dict , *lowerCamelCase_ : str , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : int ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[int] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Union[str, Any] , *lowerCamelCase_ : int , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Dict , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Any , *lowerCamelCase_ : int , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : int ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : Dict , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : List[str] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Tuple , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : Any , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) def __A ( *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" requires_backends(lowerCamelCase_ , ["""torch"""] ) def __A ( *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" requires_backends(lowerCamelCase_ , ["""torch"""] ) def __A ( *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" requires_backends(lowerCamelCase_ , ["""torch"""] ) def __A ( *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" requires_backends(lowerCamelCase_ , ["""torch"""] ) def __A ( *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" requires_backends(lowerCamelCase_ , ["""torch"""] ) def __A ( *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" requires_backends(lowerCamelCase_ , ["""torch"""] ) def __A ( *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" requires_backends(lowerCamelCase_ , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[int] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Union[str, Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[int] , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : str , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : Any , **lowerCamelCase_ : List[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : int , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Dict , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : List[Any] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : int ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : int , **lowerCamelCase_ : int ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Tuple , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : int , *lowerCamelCase_ : int , **lowerCamelCase_ : Any ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : int , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : Any , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[int] , *lowerCamelCase_ : int , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : int , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : List[str] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Dict , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : int , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : int , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : str , *lowerCamelCase_ : Any , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Any ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Any ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[int] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Any ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Any ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[int] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Any ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Dict , *lowerCamelCase_ : Any , **lowerCamelCase_ : List[Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Union[str, Any] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Union[str, Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : int ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Dict , *lowerCamelCase_ : str , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : int , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[int] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : Any , **lowerCamelCase_ : Any ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : int , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : Any , **lowerCamelCase_ : Any ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Any ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : int ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : List[str] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : List[Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Any ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Any , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : int , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Any , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Union[str, Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Any , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Union[str, Any] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : List[Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : int , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[Any] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Any ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : str , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : int , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : str , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : str , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : int ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : int , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[int] , *lowerCamelCase_ : str , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : Any , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : Tuple ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Optional[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : str , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : str , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''torch'''] def __init__( self : Any , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(cls , ["""torch"""] )
323
'''simple docstring''' from manim import * class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 ) SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 ) SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) model.move_to([3, -1.0, 0] ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i, rect in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 ) target.move_to(lowerCamelCase_ ) model_arr.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(lowerCamelCase_ ) self.add(*lowerCamelCase_ , *lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 ) SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) disk.move_to([-4, -1.25, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 ) input.set_fill(lowerCamelCase_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 ) self.play(Write(lowerCamelCase_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 ) self.play(MoveToTarget(lowerCamelCase_ ) ) self.play(FadeOut(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) SCREAMING_SNAKE_CASE : Optional[int] = MarkupText( f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02} self.play( Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) SCREAMING_SNAKE_CASE : Optional[int] = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) SCREAMING_SNAKE_CASE : Any = AnimationGroup( FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(lowerCamelCase_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: SCREAMING_SNAKE_CASE : Optional[Any] = 0.7 self.play( Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = a_c SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , ) SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) ) self.wait()
323
1
'''simple docstring''' import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated __UpperCAmelCase = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ __UpperCAmelCase = """https://storage.googleapis.com/cvdf-datasets/mnist/""" def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" ) return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase_ )[0] @deprecated(lowerCamelCase_ , """Please use tf.data to implement this functionality.""" ) def __A ( lowerCamelCase_ ): """simple docstring""" print("""Extracting""" , f.name ) with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream: SCREAMING_SNAKE_CASE : int = _readaa(lowerCamelCase_ ) if magic != 20_51: raise ValueError( """Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) ) SCREAMING_SNAKE_CASE : str = _readaa(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = _readaa(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = _readaa(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = bytestream.read(rows * cols * num_images ) SCREAMING_SNAKE_CASE : Dict = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta ) SCREAMING_SNAKE_CASE : Any = data.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 ) return data @deprecated(lowerCamelCase_ , """Please use tf.one_hot on tensors.""" ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = labels_dense.shape[0] SCREAMING_SNAKE_CASE : int = numpy.arange(lowerCamelCase_ ) * num_classes SCREAMING_SNAKE_CASE : Any = numpy.zeros((num_labels, num_classes) ) SCREAMING_SNAKE_CASE : Dict = 1 return labels_one_hot @deprecated(lowerCamelCase_ , """Please use tf.data to implement this functionality.""" ) def __A ( lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=10 ): """simple docstring""" print("""Extracting""" , f.name ) with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream: SCREAMING_SNAKE_CASE : Tuple = _readaa(lowerCamelCase_ ) if magic != 20_49: raise ValueError( """Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) ) SCREAMING_SNAKE_CASE : Union[str, Any] = _readaa(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = bytestream.read(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(lowerCamelCase_ , lowerCamelCase_ ) return labels class UpperCamelCase__ : """simple docstring""" @deprecated( lowerCamelCase_ , """Please use alternatives such as official/mnist/_DataSet.py""" """ from tensorflow/models.""" , ) def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=dtypes.floataa , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Optional[int]=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = random_seed.get_seed(lowerCamelCase_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) SCREAMING_SNAKE_CASE : Optional[int] = dtypes.as_dtype(lowerCamelCase_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype ) if fake_data: SCREAMING_SNAKE_CASE : List[str] = 1_00_00 SCREAMING_SNAKE_CASE : Union[str, Any] = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' SCREAMING_SNAKE_CASE : Any = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 SCREAMING_SNAKE_CASE : Any = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. SCREAMING_SNAKE_CASE : Union[str, Any] = images.astype(numpy.floataa ) SCREAMING_SNAKE_CASE : str = numpy.multiply(lowerCamelCase_ , 1.0 / 255.0 ) SCREAMING_SNAKE_CASE : int = images SCREAMING_SNAKE_CASE : Optional[Any] = labels SCREAMING_SNAKE_CASE : str = 0 SCREAMING_SNAKE_CASE : Optional[Any] = 0 @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self._images @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return self._labels @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self._num_examples @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return self._epochs_completed def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=True ): '''simple docstring''' if fake_data: SCREAMING_SNAKE_CASE : Tuple = [1] * 7_84 SCREAMING_SNAKE_CASE : Dict = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(lowerCamelCase_ )], [fake_label for _ in range(lowerCamelCase_ )], ) SCREAMING_SNAKE_CASE : Optional[int] = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.arange(self._num_examples ) numpy.random.shuffle(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.images[perma] SCREAMING_SNAKE_CASE : Any = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch SCREAMING_SNAKE_CASE : Tuple = self._num_examples - start SCREAMING_SNAKE_CASE : Tuple = self._images[start : self._num_examples] SCREAMING_SNAKE_CASE : Tuple = self._labels[start : self._num_examples] # Shuffle the data if shuffle: SCREAMING_SNAKE_CASE : int = numpy.arange(self._num_examples ) numpy.random.shuffle(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = self.images[perm] SCREAMING_SNAKE_CASE : List[str] = self.labels[perm] # Start next epoch SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : List[str] = batch_size - rest_num_examples SCREAMING_SNAKE_CASE : List[str] = self._index_in_epoch SCREAMING_SNAKE_CASE : List[str] = self._images[start:end] SCREAMING_SNAKE_CASE : Any = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size SCREAMING_SNAKE_CASE : Tuple = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(lowerCamelCase_ , """Please write your own downloading logic.""" ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if not gfile.Exists(lowerCamelCase_ ): gfile.MakeDirs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) if not gfile.Exists(lowerCamelCase_ ): urllib.request.urlretrieve(lowerCamelCase_ , lowerCamelCase_ ) # noqa: S310 with gfile.GFile(lowerCamelCase_ ) as f: SCREAMING_SNAKE_CASE : Dict = f.size() print("""Successfully downloaded""" , lowerCamelCase_ , lowerCamelCase_ , """bytes.""" ) return filepath @deprecated( lowerCamelCase_ , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" ) def __A ( lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=dtypes.floataa , lowerCamelCase_=True , lowerCamelCase_=50_00 , lowerCamelCase_=None , lowerCamelCase_=DEFAULT_SOURCE_URL , ): """simple docstring""" if fake_data: def fake(): return _DataSet( [] , [] , fake_data=lowerCamelCase_ , one_hot=lowerCamelCase_ , dtype=lowerCamelCase_ , seed=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = fake() SCREAMING_SNAKE_CASE : str = fake() SCREAMING_SNAKE_CASE : List[str] = fake() return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ ) if not source_url: # empty string check SCREAMING_SNAKE_CASE : int = DEFAULT_SOURCE_URL SCREAMING_SNAKE_CASE : Optional[int] = """train-images-idx3-ubyte.gz""" SCREAMING_SNAKE_CASE : List[Any] = """train-labels-idx1-ubyte.gz""" SCREAMING_SNAKE_CASE : Optional[Any] = """t10k-images-idx3-ubyte.gz""" SCREAMING_SNAKE_CASE : Union[str, Any] = """t10k-labels-idx1-ubyte.gz""" SCREAMING_SNAKE_CASE : Tuple = _maybe_download( lowerCamelCase_ , lowerCamelCase_ , source_url + train_images_file ) with gfile.Open(lowerCamelCase_ , """rb""" ) as f: SCREAMING_SNAKE_CASE : Tuple = _extract_images(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = _maybe_download( lowerCamelCase_ , lowerCamelCase_ , source_url + train_labels_file ) with gfile.Open(lowerCamelCase_ , """rb""" ) as f: SCREAMING_SNAKE_CASE : Union[str, Any] = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = _maybe_download( lowerCamelCase_ , lowerCamelCase_ , source_url + test_images_file ) with gfile.Open(lowerCamelCase_ , """rb""" ) as f: SCREAMING_SNAKE_CASE : List[str] = _extract_images(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = _maybe_download( lowerCamelCase_ , lowerCamelCase_ , source_url + test_labels_file ) with gfile.Open(lowerCamelCase_ , """rb""" ) as f: SCREAMING_SNAKE_CASE : Union[str, Any] = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ ) if not 0 <= validation_size <= len(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = ( """Validation size should be between 0 and """ f'''{len(lowerCamelCase_ )}. Received: {validation_size}.''' ) raise ValueError(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = train_images[:validation_size] SCREAMING_SNAKE_CASE : str = train_labels[:validation_size] SCREAMING_SNAKE_CASE : int = train_images[validation_size:] SCREAMING_SNAKE_CASE : Any = train_labels[validation_size:] SCREAMING_SNAKE_CASE : Tuple = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed} SCREAMING_SNAKE_CASE : int = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
323
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = graph # mapping node to its parent in resulting breadth first tree SCREAMING_SNAKE_CASE : dict[str, str | None] = {} SCREAMING_SNAKE_CASE : List[str] = source_vertex def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex} SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue while queue: SCREAMING_SNAKE_CASE : str = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = vertex queue.append(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ ) if target_vertex_parent is None: SCREAMING_SNAKE_CASE : Tuple = ( f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(lowerCamelCase_ ) return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}''' if __name__ == "__main__": __UpperCAmelCase = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
323
1
'''simple docstring''' import baseaa def __A ( lowerCamelCase_ ): """simple docstring""" return baseaa.baaencode(string.encode("""utf-8""" ) ) def __A ( lowerCamelCase_ ): """simple docstring""" return baseaa.baadecode(lowerCamelCase_ ).decode("""utf-8""" ) if __name__ == "__main__": __UpperCAmelCase = """Hello World!""" __UpperCAmelCase = baseaa_encode(test) print(encoded) __UpperCAmelCase = baseaa_decode(encoded) print(decoded)
323
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __UpperCAmelCase = 0 __UpperCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __UpperCAmelCase = tuple[int, int] class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = pos_x SCREAMING_SNAKE_CASE : Any = pos_y SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x) SCREAMING_SNAKE_CASE : Tuple = goal_x SCREAMING_SNAKE_CASE : List[str] = goal_y SCREAMING_SNAKE_CASE : Optional[Any] = g_cost SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : int = self.calculate_heuristic() SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ): '''simple docstring''' return self.f_cost < other.f_cost class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = [self.start] SCREAMING_SNAKE_CASE : list[Node] = [] SCREAMING_SNAKE_CASE : str = False def lowerCamelCase_ ( self : Any ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowerCamelCase_ ) self.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCamelCase_ ) else: self.open_nodes.append(lowerCamelCase_ ) return [self.start.pos] def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [] for action in delta: SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1] SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) ) return successors def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = node SCREAMING_SNAKE_CASE : List[str] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent path.reverse() return path class UpperCamelCase__ : """simple docstring""" def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = False def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowerCamelCase_ , lowerCamelCase_ ) self.fwd_astar.closed_nodes.append(lowerCamelCase_ ) self.bwd_astar.closed_nodes.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node SCREAMING_SNAKE_CASE : Any = current_fwd_node SCREAMING_SNAKE_CASE : Dict = { self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop( astar.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowerCamelCase_ ) else: astar.open_nodes.append(lowerCamelCase_ ) return [self.fwd_astar.start.pos] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ ) bwd_path.pop() bwd_path.reverse() SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __UpperCAmelCase = (0, 0) __UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __UpperCAmelCase = time.time() __UpperCAmelCase = AStar(init, goal) __UpperCAmelCase = a_star.search() __UpperCAmelCase = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') __UpperCAmelCase = time.time() __UpperCAmelCase = BidirectionalAStar(init, goal) __UpperCAmelCase = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
323
1
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if len(lowerCamelCase_ ) != len(lowerCamelCase_ ): raise ValueError("""String lengths must match!""" ) SCREAMING_SNAKE_CASE : List[str] = 0 for chara, chara in zip(lowerCamelCase_ , lowerCamelCase_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
323
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''efficientnet''' def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : int = image_size SCREAMING_SNAKE_CASE : int = width_coefficient SCREAMING_SNAKE_CASE : List[str] = depth_coefficient SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor SCREAMING_SNAKE_CASE : List[str] = kernel_sizes SCREAMING_SNAKE_CASE : Dict = in_channels SCREAMING_SNAKE_CASE : List[str] = out_channels SCREAMING_SNAKE_CASE : Any = depthwise_padding SCREAMING_SNAKE_CASE : Dict = strides SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats SCREAMING_SNAKE_CASE : Any = expand_ratios SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dim SCREAMING_SNAKE_CASE : List[str] = pooling_type SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : Any = batch_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum SCREAMING_SNAKE_CASE : Dict = dropout_rate SCREAMING_SNAKE_CASE : int = drop_connect_rate SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4 class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return 1e-5
323
1
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin __UpperCAmelCase = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class UpperCamelCase__ ( unittest.TestCase , lowercase_ ): """simple docstring""" def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = load_tool("""text-question-answering""" ) self.tool.setup() SCREAMING_SNAKE_CASE : str = load_tool("""text-question-answering""" , remote=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.tool(lowerCamelCase_ , """What did Hugging Face do in April 2021?""" ) self.assertEqual(lowerCamelCase_ , """launched the BigScience Research Workshop""" ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.remote_tool(lowerCamelCase_ , """What did Hugging Face do in April 2021?""" ) self.assertEqual(lowerCamelCase_ , """launched the BigScience Research Workshop""" ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.tool(text=lowerCamelCase_ , question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(lowerCamelCase_ , """launched the BigScience Research Workshop""" ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.remote_tool(text=lowerCamelCase_ , question="""What did Hugging Face do in April 2021?""" ) self.assertEqual(lowerCamelCase_ , """launched the BigScience Research Workshop""" )
323
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): '''simple docstring''' super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {} SCREAMING_SNAKE_CASE : List[Any] = {} if prompt is not None: SCREAMING_SNAKE_CASE : List[Any] = prompt if generate_kwargs is not None: SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: SCREAMING_SNAKE_CASE : Union[str, Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ): '''simple docstring''' return super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ ) if prompt is not None: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type if model_type == "git": SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCamelCase_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: SCREAMING_SNAKE_CASE : Optional[Any] = None return model_inputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ): '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): SCREAMING_SNAKE_CASE : List[str] = None if generate_kwargs is None: SCREAMING_SNAKE_CASE : int = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name ) SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ ) return model_outputs def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [] for output_ids in model_outputs: SCREAMING_SNAKE_CASE : List[Any] = { """generated_text""": self.tokenizer.decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , ) } records.append(lowerCamelCase_ ) return records
323
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass __UpperCAmelCase = (3, 9, -11, 0, 7, 5, 1, -1) __UpperCAmelCase = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 class UpperCamelCase__ : """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : Iterable[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Node | None = None for i in sorted(lowerCamelCase_ , reverse=lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = Node(lowerCamelCase_ , self.head ) def __iter__( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.head while node: yield node.data SCREAMING_SNAKE_CASE : Optional[int] = node.next_node def __len__( self : Dict ): '''simple docstring''' return sum(1 for _ in self ) def __str__( self : int ): '''simple docstring''' return " -> ".join([str(lowerCamelCase_ ) for node in self] ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
323
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,) SCREAMING_SNAKE_CASE__ = 10 def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = { """num_train_timesteps""": 2_01, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**lowerCamelCase_ ) return config def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 10 SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0] SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1] SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = 1 scheduler.set_timesteps(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = scheduler.timesteps SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCamelCase_ ): # 1. scale model input SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0] scheduler.set_timesteps(timesteps=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ ) # 2. predict noise residual SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ) # 3. predict previous sample x_t-1 SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample SCREAMING_SNAKE_CASE : Dict = pred_prev_sample SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0] with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config() SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=lowerCamelCase_ )
323
1
'''simple docstring''' from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = CustomTokenizer pass
323
'''simple docstring''' from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ): '''simple docstring''' super().__init__( lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE : Optional[int] = Text( cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[str] = None self.builder.download_and_prepare( download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE : int = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory ) return dataset
323
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''ibert''' def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[Any]=3_05_22 , lowerCamelCase_ : Tuple=7_68 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : Optional[Any]=30_72 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : List[Any]=5_12 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Optional[Any]=0.02 , lowerCamelCase_ : List[Any]=1e-12 , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : Tuple=0 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Tuple="absolute" , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Optional[int]="none" , **lowerCamelCase_ : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : Dict = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Any = type_vocab_size SCREAMING_SNAKE_CASE : int = initializer_range SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type SCREAMING_SNAKE_CASE : int = quant_mode SCREAMING_SNAKE_CASE : List[str] = force_dequant class UpperCamelCase__ ( lowercase_ ): """simple docstring""" @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE : Dict = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
323
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 3_84 SCREAMING_SNAKE_CASE : Union[str, Any] = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE : List[str] = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2) SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE : Any = 96 SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE : int = 1_28 SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32) SCREAMING_SNAKE_CASE : Optional[Any] = 12 SCREAMING_SNAKE_CASE : str = 5_12 elif "large" in model_name: SCREAMING_SNAKE_CASE : Tuple = 1_92 SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2) SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48) SCREAMING_SNAKE_CASE : Tuple = 12 SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68 # set label information SCREAMING_SNAKE_CASE : List[str] = 1_50 SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files""" SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig( embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) SCREAMING_SNAKE_CASE : List[str] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = val def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :] # fmt: on def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = x.shape[0] SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 ) SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = x.shape[0] SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ ) return x def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[ """state_dict""" ] for name, param in state_dict.items(): print(lowerCamelCase_ , param.shape ) SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ ) if "bn" in key: SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" ) SCREAMING_SNAKE_CASE : Optional[Any] = val # rename keys SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) read_in_q_k_v(lowerCamelCase_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ ) if "norm" in key: SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # verify on image SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" ) SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor() SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE : str = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCAmelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
323
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) SCREAMING_SNAKE_CASE__ = Features({'''audio''': Audio()} ) SCREAMING_SNAKE_CASE__ = Features({'''labels''': ClassLabel} ) SCREAMING_SNAKE_CASE__ = "audio" SCREAMING_SNAKE_CASE__ = "labels" def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[Any] ): '''simple docstring''' if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowerCamelCase_ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self ) SCREAMING_SNAKE_CASE : Optional[int] = self.label_schema.copy() SCREAMING_SNAKE_CASE : Optional[Any] = features[self.label_column] SCREAMING_SNAKE_CASE : int = label_schema return task_template @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return { self.audio_column: "audio", self.label_column: "labels", }
323
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Any = pad_token_id SCREAMING_SNAKE_CASE : List[Any] = max_length SCREAMING_SNAKE_CASE : Optional[int] = vocab SCREAMING_SNAKE_CASE : List[Any] = merges SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab() return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ): '''simple docstring''' return cls(**lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ ) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs( lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
323
1
'''simple docstring''' import heapq import sys import numpy as np __UpperCAmelCase = tuple[int, int] class UpperCamelCase__ : """simple docstring""" def __init__( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Optional[Any] = set() def lowerCamelCase_ ( self : Dict ): '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float("""inf""" ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return len(self.elements ) == 0 def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ): '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(lowerCamelCase_ ) else: # update # print("update", item) SCREAMING_SNAKE_CASE : Any = [] ((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : Any = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : Optional[int] = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] ): '''simple docstring''' if item in self.set: self.set.remove(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = [] ((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : Union[str, Any] = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : Optional[int] = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return self.elements[0][1] def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' ((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : Any = heapq.heappop(self.elements ) self.set.remove(lowerCamelCase_ ) return (priority, item) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = np.array(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(lowerCamelCase_ ) return np.linalg.norm(a - b ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return consistent_heuristic(lowerCamelCase_ , lowerCamelCase_ ) // t def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = g_function[start] + Wa * heuristics[i](lowerCamelCase_ , lowerCamelCase_ ) return ans def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = np.chararray((n, n) ) for i in range(lowerCamelCase_ ): for j in range(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Dict = """*""" for i in range(lowerCamelCase_ ): for j in range(lowerCamelCase_ ): if (j, (n - 1) - i) in blocks: SCREAMING_SNAKE_CASE : List[str] = """#""" SCREAMING_SNAKE_CASE : Optional[int] = """-""" SCREAMING_SNAKE_CASE : List[str] = back_pointer[goal] while x != start: ((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : int = x # print(x) SCREAMING_SNAKE_CASE : Dict = """-""" SCREAMING_SNAKE_CASE : Dict = back_pointer[x] SCREAMING_SNAKE_CASE : Any = """-""" for i in range(lowerCamelCase_ ): for j in range(lowerCamelCase_ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=""" """ ) print("""<-- End position""" , end=""" """ ) else: print(grid[i][j] , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) print("""PATH TAKEN BY THE ALGORITHM IS:-""" ) SCREAMING_SNAKE_CASE : Tuple = back_pointer[goal] while x != start: print(lowerCamelCase_ , end=""" """ ) SCREAMING_SNAKE_CASE : Any = back_pointer[x] print(lowerCamelCase_ ) sys.exit() def __A ( lowerCamelCase_ ): """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ): """simple docstring""" for itera in range(lowerCamelCase_ ): open_list[itera].remove_element(lowerCamelCase_ ) # print("s", s) # print("j", j) ((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : Any = s SCREAMING_SNAKE_CASE : Dict = (x - 1, y) SCREAMING_SNAKE_CASE : Dict = (x + 1, y) SCREAMING_SNAKE_CASE : List[str] = (x, y + 1) SCREAMING_SNAKE_CASE : int = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(lowerCamelCase_ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = -1 SCREAMING_SNAKE_CASE : Union[str, Any] = float("""inf""" ) if valid(lowerCamelCase_ ) and g_function[neighbours] > g_function[s] + 1: SCREAMING_SNAKE_CASE : Tuple = g_function[s] + 1 SCREAMING_SNAKE_CASE : str = s if neighbours not in close_list_anchor: open_list[0].put(lowerCamelCase_ , key(lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ ) ) if neighbours not in close_list_inad: for var in range(1 , lowerCamelCase_ ): if key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) <= Wa * key( lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ ): open_list[j].put( lowerCamelCase_ , key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list __UpperCAmelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __UpperCAmelCase = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __UpperCAmelCase = make_common_ground() __UpperCAmelCase = blocks_blk # hyper parameters __UpperCAmelCase = 1 __UpperCAmelCase = 1 __UpperCAmelCase = 20 __UpperCAmelCase = 3 # one consistent and two other inconsistent # start and end destination __UpperCAmelCase = (0, 0) __UpperCAmelCase = (n - 1, n - 1) __UpperCAmelCase = 1 def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = {start: 0, goal: float("""inf""" )} SCREAMING_SNAKE_CASE : Tuple = {start: -1, goal: -1} SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Union[str, Any] = set() for i in range(lowerCamelCase_ ): open_list.append(PriorityQueue() ) open_list[i].put(lowerCamelCase_ , key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : list[int] = [] SCREAMING_SNAKE_CASE : list[int] = [] while open_list[0].minkey() < float("""inf""" ): for i in range(1 , lowerCamelCase_ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("""inf""" ): do_something(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = open_list[i].top_show() visited.add(lowerCamelCase_ ) expand_state( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) close_list_inad.append(lowerCamelCase_ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("""inf""" ): do_something(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Any = open_list[0].top_show() visited.add(lowerCamelCase_ ) expand_state( lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) close_list_anchor.append(lowerCamelCase_ ) print("""No path found to goal""" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(lowerCamelCase_ ): if (j, i) in blocks: print("""#""" , end=""" """ ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("""*""" , end=""" """ ) else: print("""-""" , end=""" """ ) else: print("""*""" , end=""" """ ) if (j, i) == (n - 1, n - 1): print("""<-- End position""" , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
323
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train""" SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Union[str, Any] = streaming SCREAMING_SNAKE_CASE : Optional[int] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Any ): '''simple docstring''' pass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = features SCREAMING_SNAKE_CASE : int = cache_dir SCREAMING_SNAKE_CASE : Dict = keep_in_memory SCREAMING_SNAKE_CASE : Tuple = streaming SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs @abstractmethod def lowerCamelCase_ ( self : Dict ): '''simple docstring''' pass
323
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = KandinskyVaaPipeline SCREAMING_SNAKE_CASE__ = [ '''image_embeds''', '''negative_image_embeds''', ] SCREAMING_SNAKE_CASE__ = ['''image_embeds''', '''negative_image_embeds'''] SCREAMING_SNAKE_CASE__ = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] SCREAMING_SNAKE_CASE__ = False @property def lowerCamelCase_ ( self : int ): '''simple docstring''' return 32 @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return 32 @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCamelCase_ ( self : Any ): '''simple docstring''' return 1_00 @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(**lowerCamelCase_ ) return model @property def lowerCamelCase_ ( self : Any ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCamelCase_ ( self : str ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.dummy_unet SCREAMING_SNAKE_CASE : Tuple = self.dummy_movq SCREAMING_SNAKE_CASE : int = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any]=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( lowerCamelCase_ ) if str(lowerCamelCase_ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = """cpu""" SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = output.images SCREAMING_SNAKE_CASE : Optional[Any] = pipe( **self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0] SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : str = np.array( [0.6_237_976, 1.0, 0.36_441_332, 1.0, 0.70_639_634, 0.29_877_186, 0.85_652_125, 0.5_216_843, 0.54_454_046] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Optional[Any] = pipeline.to(lowerCamelCase_ ) pipeline.set_progress_bar_config(disable=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = """red cat, 4k photo""" SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = pipe_prior( lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() SCREAMING_SNAKE_CASE : int = torch.Generator(device="""cuda""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipeline( image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_00 , output_type="""np""" , ) SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
323
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small""" SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = """en_speaker_1""" SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string""" SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json""" SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings""" def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) SCREAMING_SNAKE_CASE : List[str] = 35 SCREAMING_SNAKE_CASE : List[Any] = 2 SCREAMING_SNAKE_CASE : int = 8 SCREAMING_SNAKE_CASE : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string ) SCREAMING_SNAKE_CASE : Tuple = tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
323
1
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = 0 while len(lowerCamelCase_ ) > 1: SCREAMING_SNAKE_CASE : Union[str, Any] = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): SCREAMING_SNAKE_CASE : Optional[int] = files.index(min(lowerCamelCase_ ) ) temp += files[min_index] files.pop(lowerCamelCase_ ) files.append(lowerCamelCase_ ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
323
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCAmelCase = logging.getLogger(__name__) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return (preds == labels).mean() @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) SCREAMING_SNAKE_CASE__ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ ) # Set seed set_seed(training_args.seed ) try: SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]() SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels() SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ ) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , ) # Get datasets SCREAMING_SNAKE_CASE : Optional[Any] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) SCREAMING_SNAKE_CASE : Dict = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase_ ) -> Dict: SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )} # Data collator SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer SCREAMING_SNAKE_CASE : Any = Trainer( model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE : Optional[Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate() SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase_ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(lowerCamelCase_ ) return results def __A ( lowerCamelCase_ ): """simple docstring""" main() if __name__ == "__main__": main()
323
1
'''simple docstring''' import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets __UpperCAmelCase = """\ @inproceedings{lin-2004-rouge, title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\", author = \"Lin, Chin-Yew\", booktitle = \"Text Summarization Branches Out\", month = jul, year = \"2004\", address = \"Barcelona, Spain\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W04-1013\", pages = \"74--81\", } """ __UpperCAmelCase = """\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge """ __UpperCAmelCase = """ Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring, `\"rougeL\"`: Longest common subsequence based scoring. `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric('rouge') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] >>> print(results[\"rouge1\"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results[\"rouge1\"].mid.fmeasure) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase_ ( self : str ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[ """https://en.wikipedia.org/wiki/ROUGE_(metric)""", """https://github.com/google-research/google-research/tree/master/rouge""", ] , ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : int=None , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : int=False ): '''simple docstring''' if rouge_types is None: SCREAMING_SNAKE_CASE : Optional[Any] = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""] SCREAMING_SNAKE_CASE : List[str] = rouge_scorer.RougeScorer(rouge_types=lowerCamelCase_ , use_stemmer=lowerCamelCase_ ) if use_aggregator: SCREAMING_SNAKE_CASE : List[str] = scoring.BootstrapAggregator() else: SCREAMING_SNAKE_CASE : Union[str, Any] = [] for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Dict = scorer.score(lowerCamelCase_ , lowerCamelCase_ ) if use_aggregator: aggregator.add_scores(lowerCamelCase_ ) else: scores.append(lowerCamelCase_ ) if use_aggregator: SCREAMING_SNAKE_CASE : Any = aggregator.aggregate() else: SCREAMING_SNAKE_CASE : Union[str, Any] = {} for key in scores[0]: SCREAMING_SNAKE_CASE : Union[str, Any] = [score[key] for score in scores] return result
323
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block SCREAMING_SNAKE_CASE : int = torch.nn.Convad( lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] ) # down SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0] for i, down_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Any = output_channel SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i] SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block( lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) self.down_blocks.append(lowerCamelCase_ ) # mid SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # out SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU() SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Tuple = False def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = x SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[Any] ): def custom_forward(*lowerCamelCase_ : List[str] ): return module(*lowerCamelCase_ ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ ) else: # down for down_block in self.down_blocks: SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ ) # middle SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ ) # post-process SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : int = layers_per_block SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad( lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] ) SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None # mid SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # up SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : List[Any] = get_up_block( lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , ) self.up_blocks.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = output_channel # out if norm_type == "spatial": SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 ) SCREAMING_SNAKE_CASE : Dict = nn.SiLU() SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 ) SCREAMING_SNAKE_CASE : Dict = False def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = z SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ : List[str] ): def custom_forward(*lowerCamelCase_ : str ): return module(*lowerCamelCase_ ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ ) else: # middle SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ ) # post-process if latent_embeds is None: SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ ) return sample class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Tuple = n_e SCREAMING_SNAKE_CASE : int = vq_embed_dim SCREAMING_SNAKE_CASE : Tuple = beta SCREAMING_SNAKE_CASE : Union[str, Any] = legacy SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) SCREAMING_SNAKE_CASE : Optional[Any] = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0] SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed SCREAMING_SNAKE_CASE : Any = self.re_embed + 1 print( f'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' f'''Using {self.unknown_index} for unknown indices.''' ) else: SCREAMING_SNAKE_CASE : Optional[int] = n_e SCREAMING_SNAKE_CASE : Any = sane_index_shape def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 ) SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1 if self.unknown_index == "random": SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: SCREAMING_SNAKE_CASE : Any = self.unknown_index return new.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape assert len(lowerCamelCase_ ) > 1 SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 ) SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ ) if self.re_embed > self.used.shape[0]: # extra token SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ ) return back.reshape(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous() SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 ) SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape ) SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : List[str] = None # compute loss for embedding if not self.legacy: SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach() # reshape back to match original input shape SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): '''simple docstring''' if self.remap is not None: SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again # get quantized latent vectors SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ ) if shape is not None: SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ ) # reshape back to match original input shape SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = parameters SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 ) SCREAMING_SNAKE_CASE : Dict = deterministic SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar ) SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar ) if self.deterministic: SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = randn_tensor( self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype ) SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample return x def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.mean
323
1
'''simple docstring''' from collections.abc import Iterable from typing import Generic, TypeVar __UpperCAmelCase = TypeVar("""_T""") class UpperCamelCase__ ( Generic[_T] ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Iterable[_T] | None = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : list[_T] = list(iterable or [] ) SCREAMING_SNAKE_CASE : list[_T] = [] def __len__( self : Tuple ): '''simple docstring''' return len(self._stacka ) + len(self._stacka ) def __repr__( self : List[str] ): '''simple docstring''' return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})''' def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : _T ): '''simple docstring''' self._stacka.append(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self._stacka.pop SCREAMING_SNAKE_CASE : List[str] = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("""Queue is empty""" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
323
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : str = 3 SCREAMING_SNAKE_CASE : List[Any] = (32, 32) SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input return init_dict, inputs_dict
323
1
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = hex_num.strip() if not hex_num: raise ValueError("""No value was passed to the function""" ) SCREAMING_SNAKE_CASE : Optional[int] = hex_num[0] == """-""" if is_negative: SCREAMING_SNAKE_CASE : List[str] = hex_num[1:] try: SCREAMING_SNAKE_CASE : Union[str, Any] = int(lowerCamelCase_ , 16 ) except ValueError: raise ValueError("""Invalid value was passed to the function""" ) SCREAMING_SNAKE_CASE : Optional[Any] = """""" while int_num > 0: SCREAMING_SNAKE_CASE : Union[str, Any] = str(int_num % 2 ) + bin_str int_num >>= 1 return int(("""-""" + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
323
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' pass def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ ) import datasets SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) SCREAMING_SNAKE_CASE : Any = depth_estimator( [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] ) self.assertEqual( [ {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, ] , lowerCamelCase_ , ) @require_tf @unittest.skip("""Depth estimation is not implemented in TF""" ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large""" SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
323
1
'''simple docstring''' import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __UpperCAmelCase = False __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = """ybelkada/fonts""" def __A ( ): """simple docstring""" if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' """Pix2StructImageProcessor. Please upgrade torch.""" ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" requires_backends(lowerCamelCase_ , ["""torch"""] ) _check_torch_version() SCREAMING_SNAKE_CASE : Any = image_tensor.unsqueeze(0 ) SCREAMING_SNAKE_CASE : int = torch.nn.functional.unfold(lowerCamelCase_ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) SCREAMING_SNAKE_CASE : List[Any] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCamelCase_ , lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : Any = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def __A ( lowerCamelCase_ , lowerCamelCase_ = 36 , lowerCamelCase_ = "black" , lowerCamelCase_ = "white" , lowerCamelCase_ = 5 , lowerCamelCase_ = 5 , lowerCamelCase_ = 5 , lowerCamelCase_ = 5 , lowerCamelCase_ = None , lowerCamelCase_ = None , ): """simple docstring""" requires_backends(lowerCamelCase_ , """vision""" ) # Add new lines so that each line is no more than 80 characters. SCREAMING_SNAKE_CASE : Optional[Any] = textwrap.TextWrapper(width=80 ) SCREAMING_SNAKE_CASE : Tuple = wrapper.wrap(text=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = """\n""".join(lowerCamelCase_ ) if font_bytes is not None and font_path is None: SCREAMING_SNAKE_CASE : List[str] = io.BytesIO(lowerCamelCase_ ) elif font_path is not None: SCREAMING_SNAKE_CASE : List[str] = font_path else: SCREAMING_SNAKE_CASE : Dict = hf_hub_download(lowerCamelCase_ , """Arial.TTF""" ) SCREAMING_SNAKE_CASE : Tuple = ImageFont.truetype(lowerCamelCase_ , encoding="""UTF-8""" , size=lowerCamelCase_ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. SCREAMING_SNAKE_CASE : int = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = temp_draw.textbbox((0, 0) , lowerCamelCase_ , lowerCamelCase_ ) # Create the actual image with a bit of padding around the text. SCREAMING_SNAKE_CASE : Union[str, Any] = text_width + left_padding + right_padding SCREAMING_SNAKE_CASE : Any = text_height + top_padding + bottom_padding SCREAMING_SNAKE_CASE : List[str] = Image.new("""RGB""" , (image_width, image_height) , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = ImageDraw.Draw(lowerCamelCase_ ) draw.text(xy=(left_padding, top_padding) , text=lowerCamelCase_ , fill=lowerCamelCase_ , font=lowerCamelCase_ ) return image def __A ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" requires_backends(lowerCamelCase_ , """vision""" ) # Convert to PIL image if necessary SCREAMING_SNAKE_CASE : List[str] = to_pil_image(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = render_text(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = max(header_image.width , image.width ) SCREAMING_SNAKE_CASE : int = int(image.height * (new_width / image.width) ) SCREAMING_SNAKE_CASE : Tuple = int(header_image.height * (new_width / header_image.width) ) SCREAMING_SNAKE_CASE : Optional[int] = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary SCREAMING_SNAKE_CASE : List[str] = to_numpy_array(lowerCamelCase_ ) if infer_channel_dimension_format(lowerCamelCase_ ) == ChannelDimension.LAST: SCREAMING_SNAKE_CASE : Optional[Any] = to_channel_dimension_format(lowerCamelCase_ , ChannelDimension.LAST ) return new_image class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ['''flattened_patches'''] def __init__( self : str , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : int = 20_48 , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Any , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} SCREAMING_SNAKE_CASE : Optional[int] = do_normalize SCREAMING_SNAKE_CASE : int = do_convert_rgb SCREAMING_SNAKE_CASE : Any = max_patches SCREAMING_SNAKE_CASE : Optional[Any] = is_vqa def lowerCamelCase_ ( self : str , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : int , lowerCamelCase_ : dict , **lowerCamelCase_ : str ): '''simple docstring''' requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch SCREAMING_SNAKE_CASE : str = to_channel_dimension_format(lowerCamelCase_ , ChannelDimension.FIRST ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = patch_size["""height"""], patch_size["""width"""] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = get_image_size(lowerCamelCase_ ) # maximize scale s.t. SCREAMING_SNAKE_CASE : List[Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) SCREAMING_SNAKE_CASE : Union[str, Any] = max(min(math.floor(scale * image_height / patch_height ) , lowerCamelCase_ ) , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = max(min(math.floor(scale * image_width / patch_width ) , lowerCamelCase_ ) , 1 ) SCREAMING_SNAKE_CASE : List[str] = max(num_feasible_rows * patch_height , 1 ) SCREAMING_SNAKE_CASE : List[Any] = max(num_feasible_cols * patch_width , 1 ) SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=lowerCamelCase_ , antialias=lowerCamelCase_ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] SCREAMING_SNAKE_CASE : Tuple = torch_extract_patches(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = patches.shape SCREAMING_SNAKE_CASE : List[str] = patches_shape[1] SCREAMING_SNAKE_CASE : Optional[Any] = patches_shape[2] SCREAMING_SNAKE_CASE : Union[str, Any] = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] SCREAMING_SNAKE_CASE : List[Any] = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] SCREAMING_SNAKE_CASE : Dict = torch.arange(lowerCamelCase_ ).reshape([rows, 1] ).repeat(1 , lowerCamelCase_ ).reshape([rows * columns, 1] ) SCREAMING_SNAKE_CASE : int = torch.arange(lowerCamelCase_ ).reshape([1, columns] ).repeat(lowerCamelCase_ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] SCREAMING_SNAKE_CASE : List[Any] = row_ids.to(torch.floataa ) SCREAMING_SNAKE_CASE : int = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] SCREAMING_SNAKE_CASE : int = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] SCREAMING_SNAKE_CASE : List[Any] = torch.nn.functional.pad(lowerCamelCase_ , [0, 0, 0, max_patches - (rows * columns)] ).float() SCREAMING_SNAKE_CASE : List[Any] = to_numpy_array(lowerCamelCase_ ) return result def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' if image.dtype == np.uinta: SCREAMING_SNAKE_CASE : Tuple = image.astype(np.floataa ) # take mean across the whole `image` SCREAMING_SNAKE_CASE : Optional[int] = np.mean(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = np.std(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = max(lowerCamelCase_ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : ImageInput , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[Dict[str, int]] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase_ : Optional[int] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size if patch_size is not None else self.patch_size SCREAMING_SNAKE_CASE : List[Any] = max_patches if max_patches is not None else self.max_patches SCREAMING_SNAKE_CASE : Any = self.is_vqa if kwargs.get("""data_format""" , lowerCamelCase_ ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) SCREAMING_SNAKE_CASE : Tuple = make_list_of_images(lowerCamelCase_ ) if not valid_images(lowerCamelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: SCREAMING_SNAKE_CASE : List[Any] = [convert_to_rgb(lowerCamelCase_ ) for image in images] # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE : Any = [to_numpy_array(lowerCamelCase_ ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("""font_bytes""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop("""font_path""" , lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : str = [header_text] * len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = [ render_header(lowerCamelCase_ , header_text[i] , font_bytes=lowerCamelCase_ , font_path=lowerCamelCase_ ) for i, image in enumerate(lowerCamelCase_ ) ] if do_normalize: SCREAMING_SNAKE_CASE : Union[str, Any] = [self.normalize(image=lowerCamelCase_ ) for image in images] # convert to torch tensor and permute SCREAMING_SNAKE_CASE : str = [ self.extract_flattened_patches(image=lowerCamelCase_ , max_patches=lowerCamelCase_ , patch_size=lowerCamelCase_ ) for image in images ] # create attention mask in numpy SCREAMING_SNAKE_CASE : List[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] SCREAMING_SNAKE_CASE : Optional[int] = BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=lowerCamelCase_ ) return encoded_outputs
323
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18} SCREAMING_SNAKE_CASE : Optional[Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = min_resolution SCREAMING_SNAKE_CASE : List[str] = max_resolution SCREAMING_SNAKE_CASE : str = do_resize SCREAMING_SNAKE_CASE : Optional[Any] = size SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize SCREAMING_SNAKE_CASE : List[Any] = image_mean SCREAMING_SNAKE_CASE : str = image_std def lowerCamelCase_ ( self : Any ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self ) @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' pass def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
323
1
'''simple docstring''' import argparse import copy def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = {} with open(lowerCamelCase_ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: SCREAMING_SNAKE_CASE : List[Any] = [] _list.append([line.split()[1], line.split()[2]] ) SCREAMING_SNAKE_CASE : Optional[Any] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: SCREAMING_SNAKE_CASE : Union[str, Any] = [] _list.append([line.split()[0], line.split()[2]] ) SCREAMING_SNAKE_CASE : Optional[int] = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ ) as f: SCREAMING_SNAKE_CASE : List[Any] = f.read(1 ) SCREAMING_SNAKE_CASE : Dict = start_node SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : List[str] = start_node SCREAMING_SNAKE_CASE : str = 0 while visiting not in first_solution: SCREAMING_SNAKE_CASE : Any = 1_00_00 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(lowerCamelCase_ ) and k[0] not in first_solution: SCREAMING_SNAKE_CASE : Tuple = k[1] SCREAMING_SNAKE_CASE : str = k[0] first_solution.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = distance_of_first_solution + int(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = best_node first_solution.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 SCREAMING_SNAKE_CASE : List[Any] = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_00_00 ) return first_solution, distance_of_first_solution def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = [] for n in solution[1:-1]: SCREAMING_SNAKE_CASE : str = solution.index(lowerCamelCase_ ) for kn in solution[1:-1]: SCREAMING_SNAKE_CASE : str = solution.index(lowerCamelCase_ ) if n == kn: continue SCREAMING_SNAKE_CASE : int = copy.deepcopy(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = kn SCREAMING_SNAKE_CASE : List[Any] = n SCREAMING_SNAKE_CASE : Optional[int] = 0 for k in _tmp[:-1]: SCREAMING_SNAKE_CASE : Dict = _tmp[_tmp.index(lowerCamelCase_ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: SCREAMING_SNAKE_CASE : Optional[int] = distance + int(i[1] ) _tmp.append(lowerCamelCase_ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) SCREAMING_SNAKE_CASE : List[str] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda lowerCamelCase_ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : Union[str, Any] = first_solution SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : int = distance_of_first_solution SCREAMING_SNAKE_CASE : List[Any] = solution while count <= iters: SCREAMING_SNAKE_CASE : str = find_neighborhood(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : List[Any] = neighborhood[index_of_best_solution] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 SCREAMING_SNAKE_CASE : int = False while not found: SCREAMING_SNAKE_CASE : Any = 0 while i < len(lowerCamelCase_ ): if best_solution[i] != solution[i]: SCREAMING_SNAKE_CASE : int = best_solution[i] SCREAMING_SNAKE_CASE : Dict = solution[i] break SCREAMING_SNAKE_CASE : Union[str, Any] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) SCREAMING_SNAKE_CASE : Dict = True SCREAMING_SNAKE_CASE : Union[str, Any] = best_solution[:-1] SCREAMING_SNAKE_CASE : List[Any] = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: SCREAMING_SNAKE_CASE : Tuple = cost SCREAMING_SNAKE_CASE : str = solution else: SCREAMING_SNAKE_CASE : Dict = index_of_best_solution + 1 SCREAMING_SNAKE_CASE : str = neighborhood[index_of_best_solution] if len(lowerCamelCase_ ) >= size: tabu_list.pop(0 ) SCREAMING_SNAKE_CASE : Dict = count + 1 return best_solution_ever, best_cost def __A ( lowerCamelCase_=None ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = generate_neighbours(args.File ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = generate_first_solution( args.File , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = tabu_search( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.Iterations , args.Size , ) print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description="""Tabu Search""") parser.add_argument( """-f""", """--File""", type=str, help="""Path to the file containing the data""", required=True, ) parser.add_argument( """-i""", """--Iterations""", type=int, help="""How many iterations the algorithm should perform""", required=True, ) parser.add_argument( """-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True ) # Pass the arguments to main method main(parser.parse_args())
323
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCAmelCase = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
323
1
'''simple docstring''' import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = [ """word_embeddings_layernorm.weight""", """word_embeddings_layernorm.bias""", """input_layernorm.weight""", """input_layernorm.bias""", """post_attention_layernorm.weight""", """post_attention_layernorm.bias""", """self_attention.dense.bias""", """mlp.dense_4h_to_h.bias""", """ln_f.weight""", """ln_f.bias""", ] __UpperCAmelCase = [ """mlp.dense_4h_to_h.weight""", """self_attention.dense.weight""", ] def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = { """word_embeddings.weight""": """word_embeddings.weight""", """word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""", """word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""", """weight""": """ln_f.weight""", """bias""": """ln_f.bias""", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks SCREAMING_SNAKE_CASE : Dict = int(re.match(R""".*layer_(\d*).*""" , lowerCamelCase_ )[1] ) layer_number -= 3 return f'''h.{layer_number}.''' + key def __A ( lowerCamelCase_ ): """simple docstring""" if dtype == torch.bool: return 1 / 8 SCREAMING_SNAKE_CASE : str = re.search(R"""[^\d](\d+)$""" , str(lowerCamelCase_ ) ) if bit_search is None: raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' ) SCREAMING_SNAKE_CASE : int = int(bit_search.groups()[0] ) return bit_size // 8 def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if bloom_config_file == "": SCREAMING_SNAKE_CASE : int = BloomConfig() else: SCREAMING_SNAKE_CASE : Any = BloomConfig.from_json_file(lowerCamelCase_ ) if shard_model: SCREAMING_SNAKE_CASE : Union[str, Any] = os.listdir(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = sorted(filter(lambda lowerCamelCase_ : s.startswith("""layer""" ) and "model_00" in s , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = {"""weight_map""": {}, """metadata""": {}} SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Dict = BloomConfig() for j, file in enumerate(lowerCamelCase_ ): print("""Processing file: {}""".format(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : List[str] = None for i in range(lowerCamelCase_ ): # load all TP files SCREAMING_SNAKE_CASE : Union[str, Any] = file.replace("""model_00""" , f'''model_0{i}''' ) SCREAMING_SNAKE_CASE : Optional[int] = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , map_location="""cpu""" ) # Rename keys in the transformers names SCREAMING_SNAKE_CASE : str = list(temp.keys() ) for key in keys: SCREAMING_SNAKE_CASE : Optional[Any] = temp.pop(lowerCamelCase_ ) if tensors is None: SCREAMING_SNAKE_CASE : Tuple = temp else: for key in tensors.keys(): if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel SCREAMING_SNAKE_CASE : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks SCREAMING_SNAKE_CASE : int = torch.cat([tensors[key], temp[key]] , dim=lowerCamelCase_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): SCREAMING_SNAKE_CASE : Dict = tensors[key] / pretraining_tp torch.save( lowerCamelCase_ , os.path.join( lowerCamelCase_ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(lowerCamelCase_ ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): SCREAMING_SNAKE_CASE : List[Any] = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: SCREAMING_SNAKE_CASE : Optional[Any] = """pytorch_model_{}-of-{}.bin""".format( str(j + 1 ).zfill(5 ) , str(len(lowerCamelCase_ ) ).zfill(5 ) ) SCREAMING_SNAKE_CASE : List[Any] = BloomConfig() SCREAMING_SNAKE_CASE : List[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME SCREAMING_SNAKE_CASE : int = total_size with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) with open(os.path.join(lowerCamelCase_ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f: SCREAMING_SNAKE_CASE : Any = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + """\n""" f.write(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : str = BloomModel(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = os.listdir(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = sorted(filter(lambda lowerCamelCase_ : s.startswith("""layer""" ) and "model_00" in s , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = None for i, file in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = None for i in range(lowerCamelCase_ ): # load all TP files SCREAMING_SNAKE_CASE : Tuple = file.replace("""model_00""" , f'''model_0{i}''' ) SCREAMING_SNAKE_CASE : int = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , map_location="""cpu""" ) # Rename keys in the transformers names SCREAMING_SNAKE_CASE : Dict = list(temp.keys() ) for key in keys: SCREAMING_SNAKE_CASE : List[str] = temp.pop(lowerCamelCase_ ) if tensors is None: SCREAMING_SNAKE_CASE : Tuple = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel SCREAMING_SNAKE_CASE : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks SCREAMING_SNAKE_CASE : Any = torch.cat([tensors[key], temp[key]] , dim=lowerCamelCase_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): SCREAMING_SNAKE_CASE : Union[str, Any] = tensors[key] / pretraining_tp SCREAMING_SNAKE_CASE : Optional[int] = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ ) assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: SCREAMING_SNAKE_CASE : int = set(other_keys.missing_keys ) else: SCREAMING_SNAKE_CASE : str = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, f'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: SCREAMING_SNAKE_CASE : int = model.to(config.torch_dtype ) torch.save(model.state_dict() , lowerCamelCase_ ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bloom_checkpoint_path""", default=None, type=str, required=True, help="""Path to the Megatron-LM checkpoint path.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--bloom_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--shard_model""", action="""store_true""", help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""", ) parser.add_argument( """--pretraining_tp""", default=4, type=int, help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""", ) __UpperCAmelCase = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
323
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
323
1
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __A ( lowerCamelCase_ ): """simple docstring""" return x + 2 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = """x = 3""" SCREAMING_SNAKE_CASE : str = {} SCREAMING_SNAKE_CASE : Any = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ ) assert result == 3 self.assertDictEqual(lowerCamelCase_ , {"""x""": 3} ) SCREAMING_SNAKE_CASE : Union[str, Any] = """x = y""" SCREAMING_SNAKE_CASE : List[str] = {"""y""": 5} SCREAMING_SNAKE_CASE : Any = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowerCamelCase_ , {"""x""": 5, """y""": 5} ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = """y = add_two(x)""" SCREAMING_SNAKE_CASE : Optional[int] = {"""x""": 3} SCREAMING_SNAKE_CASE : str = evaluate(lowerCamelCase_ , {"""add_two""": add_two} , state=lowerCamelCase_ ) assert result == 5 self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ ) assert result is None assert "tried to execute add_two" in out.out def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = """x = 3""" SCREAMING_SNAKE_CASE : List[str] = {} SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ ) assert result == 3 self.assertDictEqual(lowerCamelCase_ , {"""x""": 3} ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = """test_dict = {'x': x, 'y': add_two(x)}""" SCREAMING_SNAKE_CASE : Any = {"""x""": 3} SCREAMING_SNAKE_CASE : Any = evaluate(lowerCamelCase_ , {"""add_two""": add_two} , state=lowerCamelCase_ ) self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """y""": 5} ) self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = """x = 3\ny = 5""" SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Dict = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """y""": 5} ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = """text = f'This is x: {x}.'""" SCREAMING_SNAKE_CASE : Dict = {"""x""": 3} SCREAMING_SNAKE_CASE : List[Any] = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """text""": """This is x: 3."""} ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = """if x <= 3:\n y = 2\nelse:\n y = 5""" SCREAMING_SNAKE_CASE : Any = {"""x""": 3} SCREAMING_SNAKE_CASE : List[Any] = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """y""": 2} ) SCREAMING_SNAKE_CASE : Optional[int] = {"""x""": 8} SCREAMING_SNAKE_CASE : Optional[int] = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowerCamelCase_ , {"""x""": 8, """y""": 5} ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = """test_list = [x, add_two(x)]""" SCREAMING_SNAKE_CASE : List[str] = {"""x""": 3} SCREAMING_SNAKE_CASE : Any = evaluate(lowerCamelCase_ , {"""add_two""": add_two} , state=lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , [3, 5] ) self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """test_list""": [3, 5]} ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = """y = x""" SCREAMING_SNAKE_CASE : Any = {"""x""": 3} SCREAMING_SNAKE_CASE : Dict = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ ) assert result == 3 self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """y""": 3} ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = """test_list = [x, add_two(x)]\ntest_list[1]""" SCREAMING_SNAKE_CASE : int = {"""x""": 3} SCREAMING_SNAKE_CASE : Tuple = evaluate(lowerCamelCase_ , {"""add_two""": add_two} , state=lowerCamelCase_ ) assert result == 5 self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """test_list""": [3, 5]} ) SCREAMING_SNAKE_CASE : List[str] = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" SCREAMING_SNAKE_CASE : str = {"""x""": 3} SCREAMING_SNAKE_CASE : Dict = evaluate(lowerCamelCase_ , {"""add_two""": add_two} , state=lowerCamelCase_ ) assert result == 5 self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = """x = 0\nfor i in range(3):\n x = i""" SCREAMING_SNAKE_CASE : str = {} SCREAMING_SNAKE_CASE : Tuple = evaluate(lowerCamelCase_ , {"""range""": range} , state=lowerCamelCase_ ) assert result == 2 self.assertDictEqual(lowerCamelCase_ , {"""x""": 2, """i""": 2} )
323
'''simple docstring''' def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number | (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number & ~(1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return number ^ (1 << position) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return ((number >> position) & 1) == 1 def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
323
1
'''simple docstring''' import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __UpperCAmelCase = """.""" # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) __UpperCAmelCase = [ """Assert""", """AssignVariableOp""", """EmptyTensorList""", """MergeV2Checkpoints""", """ReadVariableOp""", """ResourceGather""", """RestoreV2""", """SaveV2""", """ShardedFilename""", """StatefulPartitionedCall""", """StaticRegexFullMatch""", """VarHandleOp""", ] def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = SavedModel() SCREAMING_SNAKE_CASE : Tuple = [] with open(os.path.join(lowerCamelCase_ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f: SCREAMING_SNAKE_CASE : Optional[int] = json.load(lowerCamelCase_ )["""opsets"""] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(lowerCamelCase_ )] ) with open(lowerCamelCase_ , """rb""" ) as f: saved_model.ParseFromString(f.read() ) SCREAMING_SNAKE_CASE : Optional[int] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want SCREAMING_SNAKE_CASE : Dict = sorted(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(lowerCamelCase_ ) if strict and len(lowerCamelCase_ ) > 0: raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(lowerCamelCase_ ) > 0: print(f'''Found the following incompatible ops for the opset {opset}:''' ) print(*lowerCamelCase_ , sep="""\n""" ) else: print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""") parser.add_argument( """--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested.""" ) parser.add_argument( """--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model.""" ) parser.add_argument( """--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)""" ) __UpperCAmelCase = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
323
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Optional[int] = batch_size SCREAMING_SNAKE_CASE : Any = seq_length SCREAMING_SNAKE_CASE : List[str] = is_training SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : List[Any] = rotary_dim SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = vocab_size - 1 SCREAMING_SNAKE_CASE : str = vocab_size - 1 SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1 def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : List[str] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 20 SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : str = model( input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = 20 SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE : Any = model( input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE : Dict = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @tooslow def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id SCREAMING_SNAKE_CASE : str = jax.jit(model.generate ) SCREAMING_SNAKE_CASE : str = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = 1 SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = fx_state with torch.no_grad(): SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Dict = 1 SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Tuple = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple() SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple() self.assertEqual( len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase_ )
323
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git_vision_model''' def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : str = attention_dropout SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : List[str] = hidden_act @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ): '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git''' def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ ) if vision_config is None: SCREAMING_SNAKE_CASE : Any = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : Tuple = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : str = hidden_act SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings SCREAMING_SNAKE_CASE : int = num_image_with_embedding SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id SCREAMING_SNAKE_CASE : str = eos_token_id def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
323
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Optional[int] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' f''' `n_embd`: {n_embd} are not equal.''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim SCREAMING_SNAKE_CASE : Tuple = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : str = ( nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE : Any = GPTaConfig( vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 ) SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ): '''simple docstring''' return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.encode_prefix(lowerCamelCase_ ) @torch.no_grad() def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 ) SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : Tuple = [] for feature in features: SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature # Only support beam search for now SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam( input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = eos_token_id SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool ) if input_embeds is not None: SCREAMING_SNAKE_CASE : Dict = input_embeds else: SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ ) for i in range(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = outputs.logits SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log() if scores is None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: SCREAMING_SNAKE_CASE : List[Any] = next_tokens else: SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] ) SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 ) else: SCREAMING_SNAKE_CASE : Tuple = -float(np.inf ) SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits seq_lengths[~is_stopped] += 1 SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 ) SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1] SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source] SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1] SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 ) SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source] SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 ) SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source] SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source] SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 ) SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze() if is_stopped.all(): break SCREAMING_SNAKE_CASE : int = scores / seq_lengths SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ ) # tokens tensors are already padded to max_seq_length SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order] SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
323
1
'''simple docstring''' def __A ( lowerCamelCase_ = 10_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = 2**power SCREAMING_SNAKE_CASE : List[Any] = 0 while n: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
323
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git_vision_model''' def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : str = attention_dropout SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : List[str] = hidden_act @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ): '''simple docstring''' cls._set_token_in_kwargs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''git''' def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ): '''simple docstring''' super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ ) if vision_config is None: SCREAMING_SNAKE_CASE : Any = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : Tuple = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : str = hidden_act SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings SCREAMING_SNAKE_CASE : int = num_image_with_embedding SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id SCREAMING_SNAKE_CASE : str = eos_token_id def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
323
1