code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @slow @require_torch def UpperCamelCase_ ( self : List[Any] ): __A = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" ,"prajjwal1/bert-tiny" ) __A = BertTokenizer.from_pretrained("bert-base-uncased" ) __A = bertabert.config.encoder.vocab_size __A = tokenizer.sep_token_id __A = tokenizer.cls_token_id __A = 1_28 __A = datasets.load_dataset("cnn_dailymail" ,"3.0.0" ,split="train[:1%]" ) __A = datasets.load_dataset("cnn_dailymail" ,"3.0.0" ,split="validation[:1%]" ) __A = train_dataset.select(range(32 ) ) __A = val_dataset.select(range(16 ) ) __A = 4 def _map_to_encoder_decoder_inputs(A : Optional[int] ): # Tokenizer will automatically set [BOS] <text> [EOS] __A = tokenizer(batch["article"] ,padding="max_length" ,truncation=A ,max_length=5_12 ) __A = tokenizer(batch["highlights"] ,padding="max_length" ,truncation=A ,max_length=1_28 ) __A = inputs.input_ids __A = inputs.attention_mask __A = outputs.input_ids __A = outputs.input_ids.copy() __A = [ [-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] __A = outputs.attention_mask assert all(len(A ) == 5_12 for x in inputs.input_ids ) assert all(len(A ) == 1_28 for x in outputs.input_ids ) return batch def _compute_metrics(A : Union[str, Any] ): __A = pred.label_ids __A = pred.predictions # all unnecessary tokens are removed __A = tokenizer.batch_decode(A ,skip_special_tokens=A ) __A = tokenizer.batch_decode(A ,skip_special_tokens=A ) __A = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A ) )] ) / len(A ) return {"accuracy": accuracy} # map train dataset __A = train_dataset.map( _map_to_encoder_decoder_inputs ,batched=A ,batch_size=A ,remove_columns=["article", "highlights"] ,) train_dataset.set_format( type="torch" ,columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] ,) # same for validation dataset __A = val_dataset.map( _map_to_encoder_decoder_inputs ,batched=A ,batch_size=A ,remove_columns=["article", "highlights"] ,) val_dataset.set_format( type="torch" ,columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] ,) __A = self.get_auto_remove_tmp_dir() __A = SeqaSeqTrainingArguments( output_dir=A ,per_device_train_batch_size=A ,per_device_eval_batch_size=A ,predict_with_generate=A ,evaluation_strategy="steps" ,do_train=A ,do_eval=A ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,) # instantiate trainer __A = SeqaSeqTrainer( model=A ,args=A ,compute_metrics=_compute_metrics ,train_dataset=A ,eval_dataset=A ,tokenizer=A ,) # start training trainer.train()
15
import numpy as np def UpperCAmelCase ( a_ , a_ , a_ = 1E-12 , a_ = 1_0_0 , ) -> tuple[float, np.ndarray]: """simple docstring""" assert np.shape(a_ )[0] == np.shape(a_ )[1] # Ensure proper dimensionality. assert np.shape(a_ )[0] == np.shape(a_ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ ) __A = np.iscomplexobj(a_ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(a_ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __A = False __A = 0 __A = 0 __A = 1E12 while not convergence: # Multiple matrix by the vector. __A = np.dot(a_ , a_ ) # Normalize the resulting output vector. __A = w / np.linalg.norm(a_ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __A = vector.conj().T if is_complex else vector.T __A = np.dot(a_ , np.dot(a_ , a_ ) ) # Check convergence. __A = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __A = True __A = lambda_ if is_complex: __A = np.real(lambda_ ) return lambda_, vector def UpperCAmelCase ( ) -> None: """simple docstring""" __A = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] ) __A = np.array([4_1, 4, 2_0] ) __A = real_input_matrix.astype(np.complexaaa ) __A = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __A = np.array([4_1, 4, 2_0] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __A = real_input_matrix __A = real_vector elif problem_type == "complex": __A = complex_input_matrix __A = complex_vector # Our implementation. __A , __A = power_iteration(a_ , a_ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __A , __A = np.linalg.eigh(a_ ) # Last eigenvalue is the maximum one. __A = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __A = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
15
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name SCREAMING_SNAKE_CASE :Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def UpperCAmelCase ( a_ , a_ , a_=8 ) -> Union[str, Any]: """simple docstring""" __A = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 __A = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[int] ,A : UNetaDConditionModel ,A : DDPMScheduler ,A : VQModel ,): super().__init__() self.register_modules( unet=A ,scheduler=A ,movq=A ,) __A = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCamelCase_ ( self : Any ,A : Any ,A : int ,A : Tuple ,A : Union[str, Any] ,A : Tuple ,A : Optional[int] ): if latents is None: __A = randn_tensor(A ,generator=A ,device=A ,dtype=A ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) __A = latents.to(A ) __A = latents * scheduler.init_noise_sigma return latents def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any]=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) __A = torch.device(f'''cuda:{gpu_id}''' ) __A = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(A ,A ) def UpperCamelCase_ ( self : str ,A : Optional[Any]=0 ): if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) __A = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu" ,silence_dtype_warnings=A ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __A = None for cpu_offloaded_model in [self.unet, self.movq]: __A , __A = cpu_offload_with_hook(A ,A ,prev_module_hook=A ) # We'll offload the last model manually. __A = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCamelCase_ ( self : Union[str, Any] ): if not hasattr(self.unet ,"_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(A ,"_hf_hook" ) and hasattr(module._hf_hook ,"execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(A ) def __call__( self : Union[str, Any] ,A : Union[torch.FloatTensor, List[torch.FloatTensor]] ,A : Union[torch.FloatTensor, List[torch.FloatTensor]] ,A : torch.FloatTensor ,A : int = 5_12 ,A : int = 5_12 ,A : int = 1_00 ,A : float = 4.0 ,A : int = 1 ,A : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A : Optional[torch.FloatTensor] = None ,A : Optional[str] = "pil" ,A : bool = True ,): __A = self._execution_device __A = guidance_scale > 1.0 if isinstance(A ,A ): __A = torch.cat(A ,dim=0 ) if isinstance(A ,A ): __A = torch.cat(A ,dim=0 ) if isinstance(A ,A ): __A = torch.cat(A ,dim=0 ) __A = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: __A = image_embeds.repeat_interleave(A ,dim=0 ) __A = negative_image_embeds.repeat_interleave(A ,dim=0 ) __A = hint.repeat_interleave(A ,dim=0 ) __A = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=A ) __A = torch.cat([hint, hint] ,dim=0 ).to(dtype=self.unet.dtype ,device=A ) self.scheduler.set_timesteps(A ,device=A ) __A = self.scheduler.timesteps __A = self.movq.config.latent_channels __A , __A = downscale_height_and_width(A ,A ,self.movq_scale_factor ) # create initial latent __A = self.prepare_latents( (batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,A ,A ,A ,self.scheduler ,) for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance __A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __A = {"image_embeds": image_embeds, "hint": hint} __A = self.unet( sample=A ,timestep=A ,encoder_hidden_states=A ,added_cond_kwargs=A ,return_dict=A ,)[0] if do_classifier_free_guidance: __A , __A = noise_pred.split(latents.shape[1] ,dim=1 ) __A , __A = noise_pred.chunk(2 ) __A , __A = variance_pred.chunk(2 ) __A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __A = torch.cat([noise_pred, variance_pred_text] ,dim=1 ) if not ( hasattr(self.scheduler.config ,"variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __A , __A = noise_pred.split(latents.shape[1] ,dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __A = self.scheduler.step( A ,A ,A ,generator=A ,)[0] # post-processing __A = self.movq.decode(A ,force_not_quantize=A )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: __A = image * 0.5 + 0.5 __A = image.clamp(0 ,1 ) __A = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() if output_type == "pil": __A = self.numpy_to_pil(A ) if not return_dict: return (image,) return ImagePipelineOutput(images=A )
15
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE :str = 'RegNetConfig' # Base docstring SCREAMING_SNAKE_CASE :List[str] = 'facebook/regnet-y-040' SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 1088, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE :Optional[int] = 'facebook/regnet-y-040' SCREAMING_SNAKE_CASE :Any = 'tabby, tabby cat' SCREAMING_SNAKE_CASE :Optional[int] = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple ,A : int ,A : int = 3 ,A : int = 1 ,A : int = 1 ,A : Optional[str] = "relu" ,**A : Dict ,): super().__init__(**A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __A = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __A = tf.keras.layers.ConvaD( filters=A ,kernel_size=A ,strides=A ,padding="VALID" ,groups=A ,use_bias=A ,name="convolution" ,) __A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" ) __A = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : List[Any] ,A : Any ): __A = self.convolution(self.padding(A ) ) __A = self.normalization(A ) __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple ,A : RegNetConfig ,**A : str ): super().__init__(**A ) __A = config.num_channels __A = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="embedder" ,) def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ): __A = shape_list(A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __A = tf.transpose(A ,perm=(0, 2, 3, 1) ) __A = self.embedder(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Optional[int] ,A : int ,A : int = 2 ,**A : Tuple ): super().__init__(**A ) __A = tf.keras.layers.ConvaD( filters=A ,kernel_size=1 ,strides=A ,use_bias=A ,name="convolution" ) __A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" ) def UpperCamelCase_ ( self : Union[str, Any] ,A : tf.Tensor ,A : bool = False ): return self.normalization(self.convolution(A ) ,training=A ) class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Dict ,A : int ,A : int ,**A : str ): super().__init__(**A ) __A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" ) __A = [ tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="relu" ,name="attention.0" ), tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="sigmoid" ,name="attention.2" ), ] def UpperCamelCase_ ( self : Dict ,A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __A = self.pooler(A ) for layer_module in self.attention: __A = layer_module(A ) __A = hidden_state * pooled return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : Optional[int] ): super().__init__(**A ) __A = in_channels != out_channels or stride != 1 __A = max(1 ,out_channels // config.groups_width ) __A = ( TFRegNetShortCut(A ,stride=A ,name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" ,name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __A = [ TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ), TFRegNetConvLayer( A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ), TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.2" ), ] __A = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : int ,A : Optional[int] ): __A = hidden_state for layer_module in self.layers: __A = layer_module(A ) __A = self.shortcut(A ) hidden_state += residual __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[Any] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : str ): super().__init__(**A ) __A = in_channels != out_channels or stride != 1 __A = max(1 ,out_channels // config.groups_width ) __A = ( TFRegNetShortCut(A ,stride=A ,name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" ,name="shortcut" ) ) __A = [ TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ), TFRegNetConvLayer( A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ), TFRegNetSELayer(A ,reduced_channels=int(round(in_channels / 4 ) ) ,name="layer.2" ), TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.3" ), ] __A = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict ,A : Any ): __A = hidden_state for layer_module in self.layers: __A = layer_module(A ) __A = self.shortcut(A ) hidden_state += residual __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 2 ,A : int = 2 ,**A : Optional[int] ): super().__init__(**A ) __A = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __A = [ # downsampling is done in the first layer with stride of 2 layer(A ,A ,A ,stride=A ,name="layers.0" ), *[layer(A ,A ,A ,name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Any ,A : List[str] ): for layer_module in self.layers: __A = layer_module(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any ,A : RegNetConfig ,**A : List[str] ): super().__init__(**A ) __A = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( A ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="stages.0" ,) ) __A = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(A ,config.depths[1:] ) ): self.stages.append(TFRegNetStage(A ,A ,A ,depth=A ,name=f'''stages.{i+1}''' ) ) def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor ,A : bool = False ,A : bool = True ): __A = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __A = hidden_states + (hidden_state,) __A = stage_module(A ) if output_hidden_states: __A = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=A ,hidden_states=A ) @keras_serializable class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' snake_case_ = RegNetConfig def __init__( self : int ,A : Optional[int] ,**A : Dict ): super().__init__(**A ) __A = config __A = TFRegNetEmbeddings(A ,name="embedder" ) __A = TFRegNetEncoder(A ,name="encoder" ) __A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" ) @unpack_inputs def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : bool = False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.embedder(A ,training=A ) __A = self.encoder( A ,output_hidden_states=A ,return_dict=A ,training=A ) __A = encoder_outputs[0] __A = self.pooler(A ) # Change to NCHW output format have uniformity in the modules __A = tf.transpose(A ,perm=(0, 3, 1, 2) ) __A = tf.transpose(A ,perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __A = tuple([tf.transpose(A ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A ,pooler_output=A ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = RegNetConfig snake_case_ = "regnet" snake_case_ = "pixel_values" @property def UpperCamelCase_ ( self : Optional[Any] ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )} SCREAMING_SNAKE_CASE :Dict = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' SCREAMING_SNAKE_CASE :Dict = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,A : RegNetConfig ,*A : List[Any] ,**A : str ): super().__init__(A ,*A ,**A ) __A = TFRegNetMainLayer(A ,name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : int=False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.regnet( pixel_values=A ,output_hidden_states=A ,return_dict=A ,training=A ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[int] ,A : RegNetConfig ,*A : str ,**A : Tuple ): super().__init__(A ,*A ,**A ) __A = config.num_labels __A = TFRegNetMainLayer(A ,name="regnet" ) # classification head __A = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor = None ,A : tf.Tensor = None ,A : bool = None ,A : bool = None ,A : Union[str, Any]=False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.regnet( A ,output_hidden_states=A ,return_dict=A ,training=A ) __A = outputs.pooler_output if return_dict else outputs[1] __A = self.classifier[0](A ) __A = self.classifier[1](A ) __A = None if labels is None else self.hf_compute_loss(labels=A ,logits=A ) if not return_dict: __A = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=A ,logits=A ,hidden_states=outputs.hidden_states )
15
1
import csv import tweepy # Twitter API credentials SCREAMING_SNAKE_CASE :List[str] = '' SCREAMING_SNAKE_CASE :List[str] = '' SCREAMING_SNAKE_CASE :int = '' SCREAMING_SNAKE_CASE :Dict = '' def UpperCAmelCase ( a_ ) -> None: """simple docstring""" __A = tweepy.OAuthHandler(a_ , a_ ) auth.set_access_token(a_ , a_ ) __A = tweepy.API(a_ ) # initialize a list to hold all the tweepy Tweets __A = [] # make initial request for most recent tweets (200 is the maximum allowed count) __A = api.user_timeline(screen_name=a_ , count=2_0_0 ) # save most recent tweets alltweets.extend(a_ ) # save the id of the oldest tweet less one __A = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(a_ ) > 0: print(F'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates __A = api.user_timeline( screen_name=a_ , count=2_0_0 , max_id=a_ ) # save most recent tweets alltweets.extend(a_ ) # update the id of the oldest tweet less one __A = alltweets[-1].id - 1 print(F'''...{len(a_ )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv __A = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F'''new_{screen_name}_tweets.csv''' , "w" ) as f: __A = csv.writer(a_ ) writer.writerow(["id", "created_at", "text"] ) writer.writerows(a_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('FirePing32')
15
import math def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list: """simple docstring""" __A = end or len(a_ ) for i in range(a_ , a_ ): __A = i __A = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __A = array[temp_index - 1] temp_index -= 1 __A = temp_index_value return array def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap """simple docstring""" __A = index __A = 2 * index + 1 # Left Node __A = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __A = left_index if right_index < heap_size and array[largest] < array[right_index]: __A = right_index if largest != index: __A , __A = array[largest], array[index] heapify(a_ , a_ , a_ ) def UpperCAmelCase ( a_ ) -> list: """simple docstring""" __A = len(a_ ) for i in range(n // 2 , -1 , -1 ): heapify(a_ , a_ , a_ ) for i in range(n - 1 , 0 , -1 ): __A , __A = array[0], array[i] heapify(a_ , 0 , a_ ) return array def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" __A = low __A = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __A , __A = array[j], array[i] i += 1 def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) == 0: return array __A = 2 * math.ceil(math.loga(len(a_ ) ) ) __A = 1_6 return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ ) def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(a_ ) max_depth -= 1 __A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 ) __A = partition(a_ , a_ , a_ , a_ ) intro_sort(a_ , a_ , a_ , a_ , a_ ) __A = p return insertion_sort(a_ , a_ , a_ ) if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip() SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')] print(sort(unsorted))
15
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = ["image_processor", "tokenizer"] snake_case_ = "ViltImageProcessor" snake_case_ = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Optional[int] ,A : Optional[int]=None ,A : Dict=None ,**A : List[str] ): __A = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,A ,) __A = kwargs.pop("feature_extractor" ) __A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(A ,A ) __A = self.image_processor def __call__( self : Optional[Any] ,A : Dict ,A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A : bool = True ,A : Union[bool, str, PaddingStrategy] = False ,A : Union[bool, str, TruncationStrategy] = None ,A : Optional[int] = None ,A : int = 0 ,A : Optional[int] = None ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : bool = False ,A : bool = False ,A : bool = False ,A : bool = False ,A : bool = True ,A : Optional[Union[str, TensorType]] = None ,**A : Tuple ,): __A = self.tokenizer( text=A ,add_special_tokens=A ,padding=A ,truncation=A ,max_length=A ,stride=A ,pad_to_multiple_of=A ,return_token_type_ids=A ,return_attention_mask=A ,return_overflowing_tokens=A ,return_special_tokens_mask=A ,return_offsets_mapping=A ,return_length=A ,verbose=A ,return_tensors=A ,**A ,) # add pixel_values + pixel_mask __A = self.image_processor(A ,return_tensors=A ) encoding.update(A ) return encoding def UpperCamelCase_ ( self : Tuple ,*A : Optional[Any] ,**A : Tuple ): return self.tokenizer.batch_decode(*A ,**A ) def UpperCamelCase_ ( self : List[Any] ,*A : int ,**A : Dict ): return self.tokenizer.decode(*A ,**A ) @property def UpperCamelCase_ ( self : List[str] ): __A = self.tokenizer.model_input_names __A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase_ ( self : str ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,A ,) return self.image_processor_class @property def UpperCamelCase_ ( self : Optional[int] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,A ,) return self.image_processor
15
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml SCREAMING_SNAKE_CASE :Optional[int] = NewType('DataClass', Any) SCREAMING_SNAKE_CASE :int = NewType('DataClassType', Any) def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" if isinstance(a_ , a_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' ) def UpperCAmelCase ( a_ ) -> Callable[[str], Any]: """simple docstring""" __A = {str(a_ ): choice for choice in choices} return lambda a_ : str_to_choice.get(a_ , a_ ) def UpperCAmelCase ( *, a_ = None , a_ = None , a_ = dataclasses.MISSING , a_ = dataclasses.MISSING , a_ = None , **a_ , ) -> dataclasses.Field: """simple docstring""" if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls __A = {} if aliases is not None: __A = aliases if help is not None: __A = help return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_ ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 def __init__( self : Union[str, Any] ,A : Union[DataClassType, Iterable[DataClassType]] ,**A : List[Any] ): # To make the default appear when using --help if "formatter_class" not in kwargs: __A = ArgumentDefaultsHelpFormatter super().__init__(**A ) if dataclasses.is_dataclass(A ): __A = [dataclass_types] __A = list(A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(A ) @staticmethod def UpperCamelCase_ ( A : ArgumentParser ,A : dataclasses.Field ): __A = f'''--{field.name}''' __A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type ,A ): raise RuntimeError( "Unresolved type detected, which should have been done with the help of " "`typing.get_type_hints` method by default" ) __A = kwargs.pop("aliases" ,[] ) if isinstance(A ,A ): __A = [aliases] __A = getattr(field.type ,"__origin__" ,field.type ) if origin_type is Union or (hasattr(A ,"UnionType" ) and isinstance(A ,types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(A ) not in field.type.__args__ ): raise ValueError( "Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because" " the argument parser only supports one type per argument." f''' Problem encountered in field \'{field.name}\'.''' ) if type(A ) not in field.type.__args__: # filter `str` in Union __A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] __A = getattr(field.type ,"__origin__" ,field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) __A = ( field.type.__args__[0] if isinstance(A ,field.type.__args__[1] ) else field.type.__args__[1] ) __A = getattr(field.type ,"__origin__" ,field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) __A = {} if origin_type is Literal or (isinstance(field.type ,A ) and issubclass(field.type ,A )): if origin_type is Literal: __A = field.type.__args__ else: __A = [x.value for x in field.type] __A = make_choice_type_function(kwargs["choices"] ) if field.default is not dataclasses.MISSING: __A = field.default else: __A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument __A = copy(A ) # Hack because type=bool in argparse does not behave as we want. __A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. __A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way __A = default # This tells argparse we accept 0 or 1 value after --field_name __A = "?" # This is the value that will get picked if we do --field_name (without value) __A = True elif isclass(A ) and issubclass(A ,A ): __A = field.type.__args__[0] __A = "+" if field.default_factory is not dataclasses.MISSING: __A = field.default_factory() elif field.default is dataclasses.MISSING: __A = True else: __A = field.type if field.default is not dataclasses.MISSING: __A = field.default elif field.default_factory is not dataclasses.MISSING: __A = field.default_factory() else: __A = True parser.add_argument(A ,*A ,**A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): __A = False parser.add_argument(f'''--no_{field.name}''' ,action="store_false" ,dest=field.name ,**A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : DataClassType ): if hasattr(A ,"_argument_group_name" ): __A = self.add_argument_group(dtype._argument_group_name ) else: __A = self try: __A = get_type_hints(A ) except NameError: raise RuntimeError( f'''Type resolution failed for {dtype}. Try declaring the class in global scope or ''' "removing line of `from __future__ import annotations` which opts in Postponed " "Evaluation of Annotations (PEP 563)" ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A ): __A = ".".join(map(A ,sys.version_info[:3] ) ) raise RuntimeError( f'''Type resolution failed for {dtype} on Python {python_version}. Try removing ''' "line of `from __future__ import annotations` which opts in union types as " "`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To " "support Python versions that lower than 3.10, you need to use " "`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of " "`X | None`." ) from ex raise for field in dataclasses.fields(A ): if not field.init: continue __A = type_hints[field.name] self._parse_dataclass_field(A ,A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : List[Any]=None ,A : List[Any]=False ,A : Optional[Any]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=None ,): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): __A = [] if args_filename: args_files.append(Path(A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values __A = ArgumentParser() args_file_parser.add_argument(A ,type=A ,action="append" ) # Use only remaining args for further parsing (remove the args_file_flag) __A , __A = args_file_parser.parse_known_args(args=A ) __A = vars(A ).get(args_file_flag.lstrip("-" ) ,A ) if cmd_args_file_paths: args_files.extend([Path(A ) for p in cmd_args_file_paths] ) __A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last __A = file_args + args if args is not None else file_args + sys.argv[1:] __A , __A = self.parse_known_args(args=A ) __A = [] for dtype in self.dataclass_types: __A = {f.name for f in dataclasses.fields(A ) if f.init} __A = {k: v for k, v in vars(A ).items() if k in keys} for k in keys: delattr(A ,A ) __A = dtype(**A ) outputs.append(A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' ) return (*outputs,) def UpperCamelCase_ ( self : Dict ,A : Dict[str, Any] ,A : bool = False ): __A = set(args.keys() ) __A = [] for dtype in self.dataclass_types: __A = {f.name for f in dataclasses.fields(A ) if f.init} __A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) __A = dtype(**A ) outputs.append(A ) if not allow_extra_keys and unused_keys: raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(A )}''' ) return tuple(A ) def UpperCamelCase_ ( self : List[str] ,A : str ,A : bool = False ): with open(Path(A ) ,encoding="utf-8" ) as open_json_file: __A = json.loads(open_json_file.read() ) __A = self.parse_dict(A ,allow_extra_keys=A ) return tuple(A ) def UpperCamelCase_ ( self : int ,A : str ,A : bool = False ): __A = self.parse_dict(yaml.safe_load(Path(A ).read_text() ) ,allow_extra_keys=A ) return tuple(A )
15
1
from typing import Dict, Optional import numpy as np import datasets SCREAMING_SNAKE_CASE :List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' SCREAMING_SNAKE_CASE :List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' SCREAMING_SNAKE_CASE :str = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Tuple: """simple docstring""" if label_map is not None: for old_id, new_id in label_map.items(): __A = new_id # turn into Numpy arrays __A = np.array(a_ ) __A = np.array(a_ ) if reduce_labels: __A = 2_5_5 __A = label - 1 __A = 2_5_5 __A = label != ignore_index __A = np.not_equal(a_ , a_ ) __A = pred_label[mask] __A = np.array(a_ )[mask] __A = pred_label[pred_label == label] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Union[str, Any]: """simple docstring""" __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(a_ , a_ ): __A , __A , __A , __A = intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = None , a_ = False , ) -> str: """simple docstring""" __A , __A , __A , __A = total_intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) # compute metrics __A = {} __A = total_area_intersect.sum() / total_area_label.sum() __A = total_area_intersect / total_area_union __A = total_area_intersect / total_area_label __A = np.nanmean(a_ ) __A = np.nanmean(a_ ) __A = all_acc __A = iou __A = acc if nan_to_num is not None: __A = {metric: np.nan_to_num(a_ , nan=a_ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), } ) ,reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ] ,) def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Optional[Any] ,A : int ,A : bool ,A : Optional[int] = None ,A : Optional[Dict[int, int]] = None ,A : bool = False ,): __A = mean_iou( results=A ,gt_seg_maps=A ,num_labels=A ,ignore_index=A ,nan_to_num=A ,label_map=A ,reduce_labels=A ,) return iou_result
15
SCREAMING_SNAKE_CASE :Any = 256 # Modulus to hash a string SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003 def UpperCAmelCase ( a_ , a_ ) -> bool: """simple docstring""" __A = len(a_ ) __A = len(a_ ) if p_len > t_len: return False __A = 0 __A = 0 __A = 1 # Calculating the hash of pattern and substring of text for i in range(a_ ): __A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ) -> None: """simple docstring""" __A = "abc1abc12" __A = "alskfjaldsabc1abc1abc12k23adsfabcabc" __A = "alskfjaldsk23adsfabcabc" assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ ) # Test 2) __A = "ABABX" __A = "ABABZABABYABABX" assert rabin_karp(a_ , a_ ) # Test 3) __A = "AAAB" __A = "ABAAAAAB" assert rabin_karp(a_ , a_ ) # Test 4) __A = "abcdabcy" __A = "abcxabcdabxabcdabcdabcy" assert rabin_karp(a_ , a_ ) # Test 5) __A = "Lü" __A = "Lüsai" assert rabin_karp(a_ , a_ ) __A = "Lue" assert not rabin_karp(a_ , a_ ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
15
1
from math import ceil, sqrt def UpperCAmelCase ( a_ = 1_0_0_0_0_0_0 ) -> int: """simple docstring""" __A = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: __A = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: __A = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'''{solution() = }''')
15
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel SCREAMING_SNAKE_CASE :Union[str, Any] = False SCREAMING_SNAKE_CASE :Any = True SCREAMING_SNAKE_CASE :Tuple = False if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() parser.add_argument( '--repo_path', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE :Dict = { 'image_size': 'sample_size', 'num_res_blocks': 'layers_per_block', 'block_channels': 'block_out_channels', 'down_blocks': 'down_block_types', 'up_blocks': 'up_block_types', 'downscale_freq_shift': 'freq_shift', 'resnet_num_groups': 'norm_num_groups', 'resnet_act_fn': 'act_fn', 'resnet_eps': 'norm_eps', 'num_head_channels': 'attention_head_dim', } SCREAMING_SNAKE_CASE :Optional[int] = { 'time_steps': 'time_proj', 'mid': 'mid_block', 'downsample_blocks': 'down_blocks', 'upsample_blocks': 'up_blocks', } SCREAMING_SNAKE_CASE :int = '' if has_file(args.repo_path, 'config.json') else 'unet' with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader: SCREAMING_SNAKE_CASE :Dict = reader.read() SCREAMING_SNAKE_CASE :List[str] = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, 'config.json'): SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel(**config) else: SCREAMING_SNAKE_CASE :Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel SCREAMING_SNAKE_CASE :List[str] = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) SCREAMING_SNAKE_CASE :List[str] = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: SCREAMING_SNAKE_CASE :Optional[Any] = config[key] del config[key] SCREAMING_SNAKE_CASE :Optional[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']] SCREAMING_SNAKE_CASE :List[Any] = [k.replace('UNetRes', '') for k in config['up_block_types']] if do_only_weights: SCREAMING_SNAKE_CASE :Tuple = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin')) SCREAMING_SNAKE_CASE :Any = {} for param_key, param_value in state_dict.items(): if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'): continue SCREAMING_SNAKE_CASE :List[str] = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('.')[0] == key: SCREAMING_SNAKE_CASE :List[Any] = param_value SCREAMING_SNAKE_CASE :str = True if not has_changed: SCREAMING_SNAKE_CASE :List[str] = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
15
1
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = (DDIMParallelScheduler,) snake_case_ = (("eta", 0.0), ("num_inference_steps", 50)) def UpperCamelCase_ ( self : Dict ,**A : List[Any] ): __A = { "num_train_timesteps": 10_00, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**A ) return config def UpperCamelCase_ ( self : Union[str, Any] ,**A : Union[str, Any] ): __A = self.scheduler_classes[0] __A = self.get_scheduler_config(**A ) __A = scheduler_class(**A ) __A , __A = 10, 0.0 __A = self.dummy_model() __A = self.dummy_sample_deter scheduler.set_timesteps(A ) for t in scheduler.timesteps: __A = model(A ,A ) __A = scheduler.step(A ,A ,A ,A ).prev_sample return sample def UpperCamelCase_ ( self : Dict ): for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=A ) def UpperCamelCase_ ( self : Optional[int] ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=A ) __A = self.scheduler_classes[0] __A = self.get_scheduler_config(steps_offset=1 ) __A = scheduler_class(**A ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps ,torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def UpperCamelCase_ ( self : Dict ): for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=A ,beta_end=A ) def UpperCamelCase_ ( self : Any ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A ) def UpperCamelCase_ ( self : Optional[int] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A ) def UpperCamelCase_ ( self : Optional[Any] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=A ) def UpperCamelCase_ ( self : Any ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=A ) def UpperCamelCase_ ( self : Any ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=A ) def UpperCamelCase_ ( self : List[str] ): self.check_over_configs(thresholding=A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=A ,prediction_type=A ,sample_max_value=A ,) def UpperCamelCase_ ( self : Any ): for t in [1, 10, 49]: self.check_over_forward(time_step=A ) def UpperCamelCase_ ( self : List[Any] ): for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 5_00] ): self.check_over_forward(time_step=A ,num_inference_steps=A ) def UpperCamelCase_ ( self : Dict ): for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ): self.check_over_forward(time_step=A ,eta=A ) def UpperCamelCase_ ( self : Optional[int] ): __A = self.scheduler_classes[0] __A = self.get_scheduler_config() __A = scheduler_class(**A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 ,4_00 ) - 0.1_47_71 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 ,9_60 ) - 0.3_24_60 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ,4_86 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ,9_98 ) - 0.02 ) ) < 1E-5 def UpperCamelCase_ ( self : List[Any] ): __A = self.scheduler_classes[0] __A = self.get_scheduler_config() __A = scheduler_class(**A ) __A , __A = 10, 0.0 scheduler.set_timesteps(A ) __A = self.dummy_model() __A = self.dummy_sample_deter __A = self.dummy_sample_deter + 0.1 __A = self.dummy_sample_deter - 0.1 __A = samplea.shape[0] __A = torch.stack([samplea, samplea, samplea] ,dim=0 ) __A = torch.arange(A )[0:3, None].repeat(1 ,A ) __A = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) ) __A = scheduler.batch_step_no_noise(A ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,A ) __A = torch.sum(torch.abs(A ) ) __A = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2 assert abs(result_mean.item() - 0.49_82 ) < 1E-3 def UpperCamelCase_ ( self : List[Any] ): __A = self.full_loop() __A = torch.sum(torch.abs(A ) ) __A = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3 def UpperCamelCase_ ( self : Tuple ): __A = self.full_loop(prediction_type="v_prediction" ) __A = torch.sum(torch.abs(A ) ) __A = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 52.53_02 ) < 1E-2 assert abs(result_mean.item() - 0.06_84 ) < 1E-3 def UpperCamelCase_ ( self : Union[str, Any] ): # We specify different beta, so that the first alpha is 0.99 __A = self.full_loop(set_alpha_to_one=A ,beta_start=0.01 ) __A = torch.sum(torch.abs(A ) ) __A = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2 assert abs(result_mean.item() - 0.19_51 ) < 1E-3 def UpperCamelCase_ ( self : Any ): # We specify different beta, so that the first alpha is 0.99 __A = self.full_loop(set_alpha_to_one=A ,beta_start=0.01 ) __A = torch.sum(torch.abs(A ) ) __A = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2 assert abs(result_mean.item() - 0.19_41 ) < 1E-3
15
import argparse import math import traceback import dateutil.parser as date_parser import requests def UpperCAmelCase ( a_ ) -> str: """simple docstring""" __A = {} __A = job["started_at"] __A = job["completed_at"] __A = date_parser.parse(a_ ) __A = date_parser.parse(a_ ) __A = round((end_datetime - start_datetime).total_seconds() / 60.0 ) __A = start __A = end __A = duration_in_min return job_info def UpperCAmelCase ( a_ , a_=None ) -> str: """simple docstring""" __A = None if token is not None: __A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} __A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' __A = requests.get(a_ , headers=a_ ).json() __A = {} try: job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} ) __A = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 ) for i in range(a_ ): __A = requests.get(url + F'''&page={i + 2}''' , headers=a_ ).json() job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args() SCREAMING_SNAKE_CASE :Union[str, Any] = get_job_time(args.workflow_run_id) SCREAMING_SNAKE_CASE :Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'''{k}: {v["duration"]}''')
15
1
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) SCREAMING_SNAKE_CASE :List[str] = 'pytorch_model.bin' SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json' SCREAMING_SNAKE_CASE :Optional[int] = 'adapter_config.json' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.bin' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.safetensors' SCREAMING_SNAKE_CASE :str = 'tf_model.h5' SCREAMING_SNAKE_CASE :List[Any] = 'tf_model.h5.index.json' SCREAMING_SNAKE_CASE :str = 'model.ckpt' SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack' SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack.index.json' SCREAMING_SNAKE_CASE :Tuple = 'model.safetensors' SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors.index.json' SCREAMING_SNAKE_CASE :str = 'config.json' SCREAMING_SNAKE_CASE :int = 'preprocessor_config.json' SCREAMING_SNAKE_CASE :Optional[Any] = FEATURE_EXTRACTOR_NAME SCREAMING_SNAKE_CASE :Optional[int] = 'generation_config.json' SCREAMING_SNAKE_CASE :List[str] = 'modelcard.json' SCREAMING_SNAKE_CASE :Optional[int] = '▁' SCREAMING_SNAKE_CASE :Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility SCREAMING_SNAKE_CASE :str = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. SCREAMING_SNAKE_CASE :Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] SCREAMING_SNAKE_CASE :List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def UpperCAmelCase ( a_ ) -> Dict: """simple docstring""" if version.parse(a_ ) < version.parse(a_ ): if "dev" in min_version: __A = ( "This example requires a source install from HuggingFace Transformers (see " "`https://huggingface.co/docs/transformers/installation#install-from-source`)," ) else: __A = F'''This example requires a minimum version of {min_version},''' error_message += F''' but the version found is {__version__}.\n''' raise ImportError( error_message + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " "versions of HuggingFace Transformers." )
15
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" __A = args.pruning_method __A = args.threshold __A = args.model_name_or_path.rstrip("/" ) __A = args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) __A = torch.load(os.path.join(a_ , "pytorch_model.bin" ) ) __A = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "bias" in name: __A = tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": __A = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = TopKBinarizer.apply(a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = ThresholdBinarizer.apply(a_ , a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A , __A = -0.1, 1.1 __A = torch.sigmoid(a_ ) __A = s * (r - l) + l __A = s_bar.clamp(min=0.0 , max=1.0 ) __A = tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError("Unknown pruning method" ) if target_model_path is None: __A = os.path.join( os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' ) if not os.path.isdir(a_ ): shutil.copytree(a_ , a_ ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(a_ , os.path.join(a_ , "pytorch_model.bin" ) ) print("\nPruned model saved! See you later!" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) SCREAMING_SNAKE_CASE :str = parser.parse_args() main(args)
15
1
import math def UpperCAmelCase ( a_ , a_ ) -> float: """simple docstring""" if ( not isinstance(a_ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def UpperCAmelCase ( a_ , a_ ) -> float: """simple docstring""" if ( not isinstance(a_ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
15
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :int = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE :Union[str, Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } SCREAMING_SNAKE_CASE :int = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] snake_case_ = [] def __init__( self : Any ,A : List[str] ,A : str="<unk>" ,A : int="<s>" ,A : Union[str, Any]="</s>" ,A : List[str]="<pad>" ,A : int="[SEP]" ,A : Optional[Any]="[MASK]" ,A : Tuple="[CLS]" ,A : Optional[Dict[str, Any]] = None ,**A : Any ,): __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token __A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sep_token=A ,mask_token=A ,cls_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = vocab_file __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) @property def UpperCamelCase_ ( self : List[str] ): return self.sp_model.get_piece_size() def UpperCamelCase_ ( self : Optional[Any] ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[int] ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : str ,A : Optional[Any] ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : List[str] ,A : Tuple ): return self.sp_model.piece_to_id(A ) def UpperCamelCase_ ( self : List[Any] ,A : Tuple ): __A = self.sp_model.IdToPiece(A ) return token def UpperCamelCase_ ( self : List[Any] ,A : int ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string.strip() def UpperCamelCase_ ( self : Tuple ,A : List[int] ,A : bool = False ,A : bool = None ,A : bool = True ,**A : Union[str, Any] ,): __A = kwargs.pop("use_source_tokenizer" ,A ) __A = self.convert_ids_to_tokens(A ,skip_special_tokens=A ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __A = [] __A = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) __A = [] sub_texts.append(A ) else: current_sub_text.append(A ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: __A = re.sub(R" (\[(MASK|SEP)\])" ,R"\1" ," ".join(A ) ) else: __A = "".join(A ) __A = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __A = self.clean_up_tokenization(A ) return clean_text else: return text def UpperCamelCase_ ( self : str ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A = [self.cls_token_id] __A = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self : Optional[int] ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1] def UpperCamelCase_ ( self : Any ,A : List[int] ,A : Optional[List[int]] = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
15
1
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__) def UpperCAmelCase ( a_ ) -> int: """simple docstring""" __A = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] ) __A = MaskFormerConfig(backbone_config=a_ ) __A = "huggingface/label-files" if "ade20k-full" in model_name: # this should be ok __A = 8_4_7 __A = "maskformer-ade20k-full-id2label.json" elif "ade" in model_name: # this should be ok __A = 1_5_0 __A = "ade20k-id2label.json" elif "coco-stuff" in model_name: # this should be ok __A = 1_7_1 __A = "maskformer-coco-stuff-id2label.json" elif "coco" in model_name: # TODO __A = 1_3_3 __A = "coco-panoptic-id2label.json" elif "cityscapes" in model_name: # this should be ok __A = 1_9 __A = "cityscapes-id2label.json" elif "vistas" in model_name: # this should be ok __A = 6_5 __A = "mapillary-vistas-id2label.json" __A = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) ) __A = {int(a_ ): v for k, v in idalabel.items()} return config def UpperCAmelCase ( a_ ) -> Any: """simple docstring""" __A = [] # stem # fmt: off rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") ) rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") ) # heads on top rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") ) rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") ) rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") ) for i in range(3 ): rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]: """simple docstring""" __A = dct.pop(a_ ) __A = val def UpperCAmelCase ( a_ , a_ ) -> Dict: """simple docstring""" __A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __A = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __A = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) __A = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __A = in_proj_weight[:dim, :] __A = in_proj_bias[: dim] __A = in_proj_weight[ dim : dim * 2, : ] __A = in_proj_bias[ dim : dim * 2 ] __A = in_proj_weight[ -dim :, : ] __A = in_proj_bias[-dim :] # fmt: on def UpperCAmelCase ( a_ , a_ ) -> str: """simple docstring""" __A = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) __A = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) __A = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict __A = in_proj_weight[: hidden_size, :] __A = in_proj_bias[:config.hidden_size] __A = in_proj_weight[hidden_size : hidden_size * 2, :] __A = in_proj_bias[hidden_size : hidden_size * 2] __A = in_proj_weight[-hidden_size :, :] __A = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) __A = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) __A = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict __A = in_proj_weight[: hidden_size, :] __A = in_proj_bias[:config.hidden_size] __A = in_proj_weight[hidden_size : hidden_size * 2, :] __A = in_proj_bias[hidden_size : hidden_size * 2] __A = in_proj_weight[-hidden_size :, :] __A = in_proj_bias[-hidden_size :] # fmt: on def UpperCAmelCase ( ) -> torch.Tensor: """simple docstring""" __A = "http://images.cocodataset.org/val2017/000000039769.jpg" __A = Image.open(requests.get(a_ , stream=a_ ).raw ) return im @torch.no_grad() def UpperCAmelCase ( a_ , a_ , a_ , a_ = False ) -> Union[str, Any]: """simple docstring""" __A = get_maskformer_config(a_ ) # load original state_dict with open(a_ , "rb" ) as f: __A = pickle.load(a_ ) __A = data["model"] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys __A = create_rename_keys(a_ ) for src, dest in rename_keys: rename_key(a_ , a_ , a_ ) read_in_swin_q_k_v(a_ , config.backbone_config ) read_in_decoder_q_k_v(a_ , a_ ) # update to torch tensors for key, value in state_dict.items(): __A = torch.from_numpy(a_ ) # load 🤗 model __A = MaskFormerForInstanceSegmentation(a_ ) model.eval() for name, param in model.named_parameters(): print(a_ , param.shape ) __A , __A = model.load_state_dict(a_ , strict=a_ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(a_ ) == 0, F'''Unexpected keys: {unexpected_keys}''' # verify results __A = prepare_img() if "vistas" in model_name: __A = 6_5 elif "cityscapes" in model_name: __A = 6_5_5_3_5 else: __A = 2_5_5 __A = True if "ade" in model_name else False __A = MaskFormerImageProcessor(ignore_index=a_ , reduce_labels=a_ ) __A = image_processor(a_ , return_tensors="pt" ) __A = model(**a_ ) print("Logits:" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": __A = torch.tensor( [[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , a_ , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(a_ ).mkdir(exist_ok=a_ ) model.save_pretrained(a_ ) image_processor.save_pretrained(a_ ) if push_to_hub: print("Pushing model and image processor to the hub..." ) model.push_to_hub(F'''nielsr/{model_name}''' ) image_processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) SCREAMING_SNAKE_CASE :Optional[Any] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
15
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): SCREAMING_SNAKE_CASE :Any = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: SCREAMING_SNAKE_CASE :int = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def UpperCAmelCase ( a_ ) -> Optional[Any]: """simple docstring""" __A = (images / 2 + 0.5).clamp(0 , 1 ) __A = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __A = numpy_to_pil(a_ ) return images def UpperCAmelCase ( a_ ) -> int: """simple docstring""" if images.ndim == 3: __A = images[None, ...] __A = (images * 2_5_5).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images __A = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: __A = [Image.fromarray(a_ ) for image in images] return pil_images
15
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE :List[Any] = { 'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Dict = [ 'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTBigCodeForSequenceClassification', 'GPTBigCodeForTokenClassification', 'GPTBigCodeForCausalLM', 'GPTBigCodeModel', 'GPTBigCodePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :List[Any] = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "yolos" def __init__( self : Any ,A : Optional[Any]=7_68 ,A : Dict=12 ,A : Any=12 ,A : str=30_72 ,A : Any="gelu" ,A : str=0.0 ,A : List[str]=0.0 ,A : Dict=0.02 ,A : int=1E-12 ,A : Tuple=[5_12, 8_64] ,A : List[Any]=16 ,A : str=3 ,A : str=True ,A : Any=1_00 ,A : Dict=True ,A : Dict=False ,A : Tuple=1 ,A : Union[str, Any]=5 ,A : Optional[Any]=2 ,A : Union[str, Any]=5 ,A : int=2 ,A : int=0.1 ,**A : List[str] ,): super().__init__(**A ) __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = initializer_range __A = layer_norm_eps __A = image_size __A = patch_size __A = num_channels __A = qkv_bias __A = num_detection_tokens __A = use_mid_position_embeddings __A = auxiliary_loss # Hungarian matcher __A = class_cost __A = bbox_cost __A = giou_cost # Loss coefficients __A = bbox_loss_coefficient __A = giou_loss_coefficient __A = eos_coefficient class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = version.parse("1.11" ) @property def UpperCamelCase_ ( self : str ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase_ ( self : List[Any] ): return 1E-4 @property def UpperCamelCase_ ( self : Optional[Any] ): return 12
15
1
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean SCREAMING_SNAKE_CASE :Dict = 0 SCREAMING_SNAKE_CASE :Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] SCREAMING_SNAKE_CASE :Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right SCREAMING_SNAKE_CASE :Optional[int] = tuple[int, int] class UpperCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,A : int ,A : int ,A : int ,A : int ,A : int ,A : Node | None ,): __A = pos_x __A = pos_y __A = (pos_y, pos_x) __A = goal_x __A = goal_y __A = g_cost __A = parent __A = self.calculate_heuristic() __A = self.g_cost + self.h_cost def UpperCamelCase_ ( self : Optional[int] ): __A = self.pos_x - self.goal_x __A = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(A ) + abs(A ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Union[str, Any] ,A : Node ): return self.f_cost < other.f_cost class UpperCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] ,A : TPosition ,A : TPosition ): __A = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,A ) __A = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,9_99_99 ,A ) __A = [self.start] __A = [] __A = False def UpperCamelCase_ ( self : Dict ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __A = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(A ) self.closed_nodes.append(A ) __A = self.get_successors(A ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(A ) else: # retrieve the best current path __A = self.open_nodes.pop(self.open_nodes.index(A ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(A ) else: self.open_nodes.append(A ) return [self.start.pos] def UpperCamelCase_ ( self : List[str] ,A : Node ): __A = [] for action in delta: __A = parent.pos_x + action[1] __A = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( A ,A ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,A ,) ) return successors def UpperCamelCase_ ( self : int ,A : Node | None ): __A = node __A = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __A = current_node.parent path.reverse() return path class UpperCAmelCase : '''simple docstring''' def __init__( self : List[str] ,A : TPosition ,A : TPosition ): __A = AStar(A ,A ) __A = AStar(A ,A ) __A = False def UpperCamelCase_ ( self : List[str] ): while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() __A = self.fwd_astar.open_nodes.pop(0 ) __A = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( A ,A ) self.fwd_astar.closed_nodes.append(A ) self.bwd_astar.closed_nodes.append(A ) __A = current_bwd_node __A = current_fwd_node __A = { self.fwd_astar: self.fwd_astar.get_successors(A ), self.bwd_astar: self.bwd_astar.get_successors(A ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(A ) else: # retrieve the best current path __A = astar.open_nodes.pop( astar.open_nodes.index(A ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(A ) else: astar.open_nodes.append(A ) return [self.fwd_astar.start.pos] def UpperCamelCase_ ( self : Dict ,A : Node ,A : Node ): __A = self.fwd_astar.retrace_path(A ) __A = self.bwd_astar.retrace_path(A ) bwd_path.pop() bwd_path.reverse() __A = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] SCREAMING_SNAKE_CASE :List[Any] = (0, 0) SCREAMING_SNAKE_CASE :int = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) SCREAMING_SNAKE_CASE :Dict = time.time() SCREAMING_SNAKE_CASE :Union[str, Any] = AStar(init, goal) SCREAMING_SNAKE_CASE :Union[str, Any] = a_star.search() SCREAMING_SNAKE_CASE :Optional[int] = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') SCREAMING_SNAKE_CASE :Tuple = time.time() SCREAMING_SNAKE_CASE :List[Any] = BidirectionalAStar(init, goal) SCREAMING_SNAKE_CASE :str = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
15
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) SCREAMING_SNAKE_CASE :List[str] = 'pytorch_model.bin' SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json' SCREAMING_SNAKE_CASE :Optional[int] = 'adapter_config.json' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.bin' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.safetensors' SCREAMING_SNAKE_CASE :str = 'tf_model.h5' SCREAMING_SNAKE_CASE :List[Any] = 'tf_model.h5.index.json' SCREAMING_SNAKE_CASE :str = 'model.ckpt' SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack' SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack.index.json' SCREAMING_SNAKE_CASE :Tuple = 'model.safetensors' SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors.index.json' SCREAMING_SNAKE_CASE :str = 'config.json' SCREAMING_SNAKE_CASE :int = 'preprocessor_config.json' SCREAMING_SNAKE_CASE :Optional[Any] = FEATURE_EXTRACTOR_NAME SCREAMING_SNAKE_CASE :Optional[int] = 'generation_config.json' SCREAMING_SNAKE_CASE :List[str] = 'modelcard.json' SCREAMING_SNAKE_CASE :Optional[int] = '▁' SCREAMING_SNAKE_CASE :Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility SCREAMING_SNAKE_CASE :str = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. SCREAMING_SNAKE_CASE :Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] SCREAMING_SNAKE_CASE :List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def UpperCAmelCase ( a_ ) -> Dict: """simple docstring""" if version.parse(a_ ) < version.parse(a_ ): if "dev" in min_version: __A = ( "This example requires a source install from HuggingFace Transformers (see " "`https://huggingface.co/docs/transformers/installation#install-from-source`)," ) else: __A = F'''This example requires a minimum version of {min_version},''' error_message += F''' but the version found is {__version__}.\n''' raise ImportError( error_message + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " "versions of HuggingFace Transformers." )
15
1
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]: """simple docstring""" __A = AutoConfig.from_pretrained(a_ ) __A = FlaxAutoModelForSeqaSeqLM.from_config(config=a_ ) __A = checkpoints.load_tax_checkpoint(a_ ) __A = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": __A = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": __A = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __A = "TransientGlobalSelfAttention" else: raise ValueError( "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`" " attribute with a value from ['local', 'transient-global]." ) # Encoder for layer_index in range(config.num_layers ): __A = F'''layers_{str(a_ )}''' # Self-Attention __A = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] __A = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] __A = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] __A = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __A = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization __A = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: __A = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] __A = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: __A = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] __A = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization __A = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning __A = flax_model.params["encoder"]["block"][str(a_ )]["layer"] __A = tax_attention_key __A = tax_attention_out __A = tax_attention_query __A = tax_attention_value __A = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __A = tax_global_layer_norm if split_mlp_wi: __A = tax_mlp_wi_a __A = tax_mlp_wi_a else: __A = tax_mlp_wi __A = tax_mlp_wo __A = tax_mlp_layer_norm __A = flax_model_encoder_layer_block # Only for layer 0: __A = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T __A = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __A = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T __A = tax_encoder_global_rel_embedding # Assigning __A = tax_model["target"]["encoder"]["encoder_norm"]["scale"] __A = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): __A = F'''layers_{str(a_ )}''' # Self-Attention __A = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] __A = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] __A = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] __A = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization __A = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention __A = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] __A = tax_enc_dec_attention_module["key"]["kernel"] __A = tax_enc_dec_attention_module["out"]["kernel"] __A = tax_enc_dec_attention_module["query"]["kernel"] __A = tax_enc_dec_attention_module["value"]["kernel"] # Layer Normalization __A = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: __A = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] __A = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: __A = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] __A = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization __A = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning __A = flax_model.params["decoder"]["block"][str(a_ )]["layer"] __A = tax_attention_key __A = tax_attention_out __A = tax_attention_query __A = tax_attention_value __A = tax_pre_attention_layer_norm __A = tax_enc_dec_attention_key __A = tax_enc_dec_attention_out __A = tax_enc_dec_attention_query __A = tax_enc_dec_attention_value __A = tax_cross_layer_norm if split_mlp_wi: __A = tax_mlp_wi_a __A = tax_mlp_wi_a else: __A = tax_mlp_wi __A = tax_mlp_wo __A = txa_mlp_layer_norm __A = flax_model_decoder_layer_block # Decoder Normalization __A = tax_model["target"]["decoder"]["decoder_norm"]["scale"] __A = txa_decoder_norm # Only for layer 0: __A = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T __A = tax_decoder_rel_embedding # Token Embeddings __A = tax_model["target"]["token_embedder"]["embedding"] __A = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: __A = tax_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(a_ ) print("T5X Model was sucessfully converted!" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) SCREAMING_SNAKE_CASE :Tuple = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
15
def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" __A = [0] * len(a_ ) __A = [] __A = [1] * len(a_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(a_ ) ): if indegree[i] == 0: queue.append(a_ ) while queue: __A = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: __A = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(a_ ) print(max(a_ ) ) # Adjacency list of Graph SCREAMING_SNAKE_CASE :List[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
15
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 @flax_register_to_config class UpperCAmelCase ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 32 snake_case_ = 4 snake_case_ = 4 snake_case_ = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) snake_case_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") snake_case_ = False snake_case_ = (320, 640, 1280, 1280) snake_case_ = 2 snake_case_ = 8 snake_case_ = None snake_case_ = 1280 snake_case_ = 0.0 snake_case_ = False snake_case_ = jnp.floataa snake_case_ = True snake_case_ = 0 snake_case_ = False def UpperCamelCase_ ( self : Tuple ,A : jax.random.KeyArray ): # init input tensors __A = (1, self.in_channels, self.sample_size, self.sample_size) __A = jnp.zeros(A ,dtype=jnp.floataa ) __A = jnp.ones((1,) ,dtype=jnp.intaa ) __A = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa ) __A , __A = jax.random.split(A ) __A = {"params": params_rng, "dropout": dropout_rng} return self.init(A ,A ,A ,A )["params"] def UpperCamelCase_ ( self : Dict ): __A = self.block_out_channels __A = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __A = self.num_attention_heads or self.attention_head_dim # input __A = nn.Conv( block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) # time __A = FlaxTimesteps( block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift ) __A = FlaxTimestepEmbedding(A ,dtype=self.dtype ) __A = self.only_cross_attention if isinstance(A ,A ): __A = (only_cross_attention,) * len(self.down_block_types ) if isinstance(A ,A ): __A = (num_attention_heads,) * len(self.down_block_types ) # down __A = [] __A = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): __A = output_channel __A = block_out_channels[i] __A = i == len(A ) - 1 if down_block_type == "CrossAttnDownBlock2D": __A = FlaxCrossAttnDownBlockaD( in_channels=A ,out_channels=A ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) else: __A = FlaxDownBlockaD( in_channels=A ,out_channels=A ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,) down_blocks.append(A ) __A = down_blocks # mid __A = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) # up __A = [] __A = list(reversed(A ) ) __A = list(reversed(A ) ) __A = list(reversed(A ) ) __A = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): __A = output_channel __A = reversed_block_out_channels[i] __A = reversed_block_out_channels[min(i + 1 ,len(A ) - 1 )] __A = i == len(A ) - 1 if up_block_type == "CrossAttnUpBlock2D": __A = FlaxCrossAttnUpBlockaD( in_channels=A ,out_channels=A ,prev_output_channel=A ,num_layers=self.layers_per_block + 1 ,num_attention_heads=reversed_num_attention_heads[i] ,add_upsample=not is_final_block ,dropout=self.dropout ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) else: __A = FlaxUpBlockaD( in_channels=A ,out_channels=A ,prev_output_channel=A ,num_layers=self.layers_per_block + 1 ,add_upsample=not is_final_block ,dropout=self.dropout ,dtype=self.dtype ,) up_blocks.append(A ) __A = output_channel __A = up_blocks # out __A = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 ) __A = nn.Conv( self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) def __call__( self : Optional[Any] ,A : Tuple ,A : List[Any] ,A : List[str] ,A : Union[str, Any]=None ,A : Union[str, Any]=None ,A : bool = True ,A : bool = False ,): # 1. time if not isinstance(A ,jnp.ndarray ): __A = jnp.array([timesteps] ,dtype=jnp.intaa ) elif isinstance(A ,jnp.ndarray ) and len(timesteps.shape ) == 0: __A = timesteps.astype(dtype=jnp.floataa ) __A = jnp.expand_dims(A ,0 ) __A = self.time_proj(A ) __A = self.time_embedding(A ) # 2. pre-process __A = jnp.transpose(A ,(0, 2, 3, 1) ) __A = self.conv_in(A ) # 3. down __A = (sample,) for down_block in self.down_blocks: if isinstance(A ,A ): __A , __A = down_block(A ,A ,A ,deterministic=not train ) else: __A , __A = down_block(A ,A ,deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: __A = () for down_block_res_sample, down_block_additional_residual in zip( A ,A ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) __A = new_down_block_res_samples # 4. mid __A = self.mid_block(A ,A ,A ,deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: __A = down_block_res_samples[-(self.layers_per_block + 1) :] __A = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(A ,A ): __A = up_block( A ,temb=A ,encoder_hidden_states=A ,res_hidden_states_tuple=A ,deterministic=not train ,) else: __A = up_block(A ,temb=A ,res_hidden_states_tuple=A ,deterministic=not train ) # 6. post-process __A = self.conv_norm_out(A ) __A = nn.silu(A ) __A = self.conv_out(A ) __A = jnp.transpose(A ,(0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=A )
15
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def UpperCAmelCase ( a_ , a_ ) -> Tuple: """simple docstring""" __A = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __A = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" ) __A = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" ) __A = key.replace("heads.cmd.itm_head.cls" , "itm_head" ) __A = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" ) __A = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" ) __A = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" ) __A = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" ) __A = key.replace("mm_text_projection" , "flava.text_to_mm_projection" ) __A = key.replace("mm_image_projection" , "flava.image_to_mm_projection" ) __A = key.replace("image_encoder.module" , "flava.image_model" ) __A = key.replace("text_encoder.module" , "flava.text_model" ) __A = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" ) __A = key.replace("mm_encoder.module" , "flava.multimodal_model" ) __A = key.replace("text_projection" , "flava.text_projection" ) __A = key.replace("image_projection" , "flava.image_projection" ) __A = value.float() for key, value in codebook_state_dict.items(): __A = value return upgrade @torch.no_grad() def UpperCAmelCase ( a_ , a_ , a_ , a_=None ) -> Tuple: """simple docstring""" if config_path is not None: __A = FlavaConfig.from_pretrained(a_ ) else: __A = FlavaConfig() __A = FlavaForPreTraining(a_ ).eval() __A = convert_dalle_checkpoint(a_ , a_ , save_checkpoint=a_ ) if os.path.exists(a_ ): __A = torch.load(a_ , map_location="cpu" ) else: __A = torch.hub.load_state_dict_from_url(a_ , map_location="cpu" ) __A = upgrade_state_dict(a_ , a_ ) hf_model.load_state_dict(a_ ) __A = hf_model.state_dict() __A = count_parameters(a_ ) __A = count_parameters(a_ ) + count_parameters(a_ ) assert torch.allclose(a_ , a_ , atol=1E-3 ) hf_model.save_pretrained(a_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
15
1
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__) def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]: """simple docstring""" return [ int(1_0_0_0 * (box[0] / width) ), int(1_0_0_0 * (box[1] / height) ), int(1_0_0_0 * (box[2] / width) ), int(1_0_0_0 * (box[3] / height) ), ] def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]: """simple docstring""" __A = to_pil_image(a_ ) __A , __A = pil_image.size __A = pytesseract.image_to_data(a_ , lang=a_ , output_type="dict" , config=a_ ) __A , __A , __A , __A , __A = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates __A = [idx for idx, word in enumerate(a_ ) if not word.strip()] __A = [word for idx, word in enumerate(a_ ) if idx not in irrelevant_indices] __A = [coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices] __A = [coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices] __A = [coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices] __A = [coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format __A = [] for x, y, w, h in zip(a_ , a_ , a_ , a_ ): __A = [x, y, x + w, y + h] actual_boxes.append(a_ ) # finally, normalize the bounding boxes __A = [] for box in actual_boxes: normalized_boxes.append(normalize_box(a_ , a_ , a_ ) ) assert len(a_ ) == len(a_ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = ["pixel_values"] def __init__( self : Union[str, Any] ,A : bool = True ,A : Dict[str, int] = None ,A : PILImageResampling = PILImageResampling.BILINEAR ,A : bool = True ,A : float = 1 / 2_55 ,A : bool = True ,A : Union[float, Iterable[float]] = None ,A : Union[float, Iterable[float]] = None ,A : bool = True ,A : Optional[str] = None ,A : Optional[str] = "" ,**A : Optional[Any] ,): super().__init__(**A ) __A = size if size is not None else {"height": 2_24, "width": 2_24} __A = get_size_dict(A ) __A = do_resize __A = size __A = resample __A = do_rescale __A = rescale_value __A = do_normalize __A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __A = image_std if image_std is not None else IMAGENET_STANDARD_STD __A = apply_ocr __A = ocr_lang __A = tesseract_config def UpperCamelCase_ ( self : Any ,A : np.ndarray ,A : Dict[str, int] ,A : PILImageResampling = PILImageResampling.BILINEAR ,A : Optional[Union[str, ChannelDimension]] = None ,**A : List[str] ,): __A = get_size_dict(A ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) __A = (size["height"], size["width"]) return resize(A ,size=A ,resample=A ,data_format=A ,**A ) def UpperCamelCase_ ( self : Tuple ,A : np.ndarray ,A : Union[int, float] ,A : Optional[Union[str, ChannelDimension]] = None ,**A : List[Any] ,): return rescale(A ,scale=A ,data_format=A ,**A ) def UpperCamelCase_ ( self : Optional[Any] ,A : np.ndarray ,A : Union[float, Iterable[float]] ,A : Union[float, Iterable[float]] ,A : Optional[Union[str, ChannelDimension]] = None ,**A : List[Any] ,): return normalize(A ,mean=A ,std=A ,data_format=A ,**A ) def UpperCamelCase_ ( self : Any ,A : ImageInput ,A : bool = None ,A : Dict[str, int] = None ,A : List[str]=None ,A : bool = None ,A : float = None ,A : bool = None ,A : Union[float, Iterable[float]] = None ,A : Union[float, Iterable[float]] = None ,A : bool = None ,A : Optional[str] = None ,A : Optional[str] = None ,A : Optional[Union[str, TensorType]] = None ,A : ChannelDimension = ChannelDimension.FIRST ,**A : int ,): __A = do_resize if do_resize is not None else self.do_resize __A = size if size is not None else self.size __A = get_size_dict(A ) __A = resample if resample is not None else self.resample __A = do_rescale if do_rescale is not None else self.do_rescale __A = rescale_factor if rescale_factor is not None else self.rescale_factor __A = do_normalize if do_normalize is not None else self.do_normalize __A = image_mean if image_mean is not None else self.image_mean __A = image_std if image_std is not None else self.image_std __A = apply_ocr if apply_ocr is not None else self.apply_ocr __A = ocr_lang if ocr_lang is not None else self.ocr_lang __A = tesseract_config if tesseract_config is not None else self.tesseract_config __A = make_list_of_images(A ) if not valid_images(A ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("If do_normalize is True, image_mean and image_std must be specified." ) # All transformations expect numpy arrays. __A = [to_numpy_array(A ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self ,"pytesseract" ) __A = [] __A = [] for image in images: __A , __A = apply_tesseract(A ,A ,A ) words_batch.append(A ) boxes_batch.append(A ) if do_resize: __A = [self.resize(image=A ,size=A ,resample=A ) for image in images] if do_rescale: __A = [self.rescale(image=A ,scale=A ) for image in images] if do_normalize: __A = [self.normalize(image=A ,mean=A ,std=A ) for image in images] __A = [to_channel_dimension_format(A ,A ) for image in images] __A = BatchFeature(data={"pixel_values": images} ,tensor_type=A ) if apply_ocr: __A = words_batch __A = boxes_batch return data
15
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} SCREAMING_SNAKE_CASE :Tuple = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } SCREAMING_SNAKE_CASE :List[Any] = { 'camembert-base': 512, } SCREAMING_SNAKE_CASE :List[str] = '▁' class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] ,A : List[str] ,A : List[Any]="<s>" ,A : Tuple="</s>" ,A : Any="</s>" ,A : Optional[Any]="<s>" ,A : Tuple="<unk>" ,A : str="<pad>" ,A : int="<mask>" ,A : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] ,A : Optional[Dict[str, Any]] = None ,**A : Optional[Any] ,): # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token __A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,additional_special_tokens=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A ) ) __A = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __A = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} __A = len(self.fairseq_tokens_to_ids ) __A = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self : int ,A : List[int] ,A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A = [self.cls_token_id] __A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1] def UpperCamelCase_ ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self : Dict ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self : int ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : List[str] ,A : Dict ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(A ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(A ) def UpperCamelCase_ ( self : Dict ,A : Tuple ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string.strip() def __getstate__( self : Dict ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : Union[str, Any] ,A : Any ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : Any ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,)
15
1
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :str = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "time_series_transformer" snake_case_ = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self : Tuple ,A : Optional[int] = None ,A : Optional[int] = None ,A : str = "student_t" ,A : str = "nll" ,A : int = 1 ,A : List[int] = [1, 2, 3, 4, 5, 6, 7] ,A : Optional[Union[str, bool]] = "mean" ,A : int = 0 ,A : int = 0 ,A : int = 0 ,A : int = 0 ,A : Optional[List[int]] = None ,A : Optional[List[int]] = None ,A : int = 32 ,A : int = 32 ,A : int = 2 ,A : int = 2 ,A : int = 2 ,A : int = 2 ,A : bool = True ,A : str = "gelu" ,A : int = 64 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : int = 1_00 ,A : float = 0.02 ,A : Union[str, Any]=True ,**A : Optional[int] ,): # time series specific configuration __A = prediction_length __A = context_length or prediction_length __A = distribution_output __A = loss __A = input_size __A = num_time_features __A = lags_sequence __A = scaling __A = num_dynamic_real_features __A = num_static_real_features __A = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(A ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) __A = cardinality else: __A = [0] if embedding_dimension and num_static_categorical_features > 0: if len(A ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) __A = embedding_dimension else: __A = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality] __A = num_parallel_samples # Transformer architecture configuration __A = input_size * len(A ) + self._number_of_features __A = d_model __A = encoder_attention_heads __A = decoder_attention_heads __A = encoder_ffn_dim __A = decoder_ffn_dim __A = encoder_layers __A = decoder_layers __A = dropout __A = attention_dropout __A = activation_dropout __A = encoder_layerdrop __A = decoder_layerdrop __A = activation_function __A = init_std __A = use_cache super().__init__(is_encoder_decoder=A ,**A ) @property def UpperCamelCase_ ( self : Optional[Any] ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
15
def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) <= 1: return [tuple(a_ )] __A = [] def generate(a_ , a_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , a_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even __A , __A = arr[k - 1], arr[i] else: # k is odd __A , __A = arr[k - 1], arr[0] generate(k - 1 , a_ ) generate(len(a_ ) , a_ ) return res if __name__ == "__main__": SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by a comma:\n').strip() SCREAMING_SNAKE_CASE :Dict = [int(item) for item in user_input.split(',')] print(heaps(arr))
15
1
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset SCREAMING_SNAKE_CASE :Optional[int] = random.Random() def UpperCAmelCase ( a_ , a_=1.0 , a_=None , a_=None ) -> int: """simple docstring""" if rng is None: __A = global_rng __A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any ,A : Optional[int] ,A : List[Any]=7 ,A : List[Any]=4_00 ,A : List[Any]=20_00 ,A : Union[str, Any]=20_48 ,A : int=1_28 ,A : Optional[Any]=1 ,A : int=5_12 ,A : Any=30 ,A : List[str]=4_41_00 ,): __A = parent __A = batch_size __A = min_seq_length __A = max_seq_length __A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __A = spectrogram_length __A = feature_size __A = num_audio_channels __A = hop_length __A = chunk_length __A = sampling_rate def UpperCamelCase_ ( self : Any ): return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def UpperCamelCase_ ( self : Optional[Any] ,A : Any=False ,A : str=False ): def _flatten(A : Optional[Any] ): return list(itertools.chain(*A ) ) if equal_length: __A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __A = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: __A = [np.asarray(A ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = TvltFeatureExtractor def UpperCamelCase_ ( self : Tuple ): __A = TvltFeatureExtractionTester(self ) def UpperCamelCase_ ( self : List[Any] ): __A = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(A ,"spectrogram_length" ) ) self.assertTrue(hasattr(A ,"feature_size" ) ) self.assertTrue(hasattr(A ,"num_audio_channels" ) ) self.assertTrue(hasattr(A ,"hop_length" ) ) self.assertTrue(hasattr(A ,"chunk_length" ) ) self.assertTrue(hasattr(A ,"sampling_rate" ) ) def UpperCamelCase_ ( self : List[Any] ): __A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __A = feat_extract_first.save_pretrained(A )[0] check_json_file_has_correct_format(A ) __A = self.feature_extraction_class.from_pretrained(A ) __A = feat_extract_first.to_dict() __A = feat_extract_second.to_dict() __A = dict_first.pop("mel_filters" ) __A = dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(A ,A ) ) self.assertEqual(A ,A ) def UpperCamelCase_ ( self : List[str] ): __A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __A = os.path.join(A ,"feat_extract.json" ) feat_extract_first.to_json_file(A ) __A = self.feature_extraction_class.from_json_file(A ) __A = feat_extract_first.to_dict() __A = feat_extract_second.to_dict() __A = dict_first.pop("mel_filters" ) __A = dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(A ,A ) ) self.assertEqual(A ,A ) def UpperCamelCase_ ( self : Optional[int] ): # Initialize feature_extractor __A = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 __A = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )] __A = [np.asarray(A ) for speech_input in speech_inputs] # Test not batched input __A = feature_extractor(np_speech_inputs[0] ,return_tensors="np" ,sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched __A = feature_extractor(A ,return_tensors="np" ,sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking __A = feature_extractor( A ,return_tensors="np" ,sampling_rate=4_41_00 ,mask_audio=A ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. __A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __A = np.asarray(A ) __A = feature_extractor(A ,return_tensors="np" ,sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def UpperCamelCase_ ( self : Optional[int] ,A : Any ): __A = load_dataset("hf-internal-testing/librispeech_asr_dummy" ,"clean" ,split="validation" ) # automatic decoding with librispeech __A = ds.sort("id" ).select(range(A ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def UpperCamelCase_ ( self : List[str] ): __A = self._load_datasamples(1 ) __A = TvltFeatureExtractor() __A = feature_extractor(A ,return_tensors="pt" ).audio_values self.assertEquals(audio_values.shape ,(1, 1, 1_92, 1_28) ) __A = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,A ,atol=1E-4 ) )
15
def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) <= 1: return lst __A = 1 while i < len(a_ ): if lst[i - 1] <= lst[i]: i += 1 else: __A , __A = lst[i], lst[i - 1] i -= 1 if i == 0: __A = 1 return lst if __name__ == "__main__": SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma:\n').strip() SCREAMING_SNAKE_CASE :List[Any] = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
15
1
SCREAMING_SNAKE_CASE :Any = 256 # Modulus to hash a string SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003 def UpperCAmelCase ( a_ , a_ ) -> bool: """simple docstring""" __A = len(a_ ) __A = len(a_ ) if p_len > t_len: return False __A = 0 __A = 0 __A = 1 # Calculating the hash of pattern and substring of text for i in range(a_ ): __A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ) -> None: """simple docstring""" __A = "abc1abc12" __A = "alskfjaldsabc1abc1abc12k23adsfabcabc" __A = "alskfjaldsk23adsfabcabc" assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ ) # Test 2) __A = "ABABX" __A = "ABABZABABYABABX" assert rabin_karp(a_ , a_ ) # Test 3) __A = "AAAB" __A = "ABAAAAAB" assert rabin_karp(a_ , a_ ) # Test 4) __A = "abcdabcy" __A = "abcxabcdabxabcdabcdabcy" assert rabin_karp(a_ , a_ ) # Test 5) __A = "Lü" __A = "Lüsai" assert rabin_karp(a_ , a_ ) __A = "Lue" assert not rabin_karp(a_ , a_ ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
15
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 snake_case_ = 42 snake_case_ = None class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 2 @register_to_config def __init__( self : str ,A : float = 0.02 ,A : float = 1_00 ,A : float = 1.0_07 ,A : float = 80 ,A : float = 0.05 ,A : float = 50 ,): # standard deviation of the initial noise distribution __A = sigma_max # setable values __A = None __A = None __A = None # sigma(t_i) def UpperCamelCase_ ( self : str ,A : torch.FloatTensor ,A : Optional[int] = None ): return sample def UpperCamelCase_ ( self : Dict ,A : int ,A : Union[str, torch.device] = None ): __A = num_inference_steps __A = np.arange(0 ,self.num_inference_steps )[::-1].copy() __A = torch.from_numpy(A ).to(A ) __A = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __A = torch.tensor(A ,dtype=torch.floataa ,device=A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : torch.FloatTensor ,A : float ,A : Optional[torch.Generator] = None ): if self.config.s_min <= sigma <= self.config.s_max: __A = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 ) else: __A = 0 # sample eps ~ N(0, S_noise^2 * I) __A = self.config.s_noise * randn_tensor(sample.shape ,generator=A ).to(sample.device ) __A = sigma + gamma * sigma __A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase_ ( self : Dict ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_hat + sigma_hat * model_output __A = (sample_hat - pred_original_sample) / sigma_hat __A = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : Optional[int] ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_prev + sigma_prev * model_output __A = (sample_prev - pred_original_sample) / sigma_prev __A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : List[str] ,A : str ): raise NotImplementedError()
15
1
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class UpperCAmelCase : '''simple docstring''' @staticmethod def UpperCamelCase_ ( *A : Any ,**A : int ): pass def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. SCREAMING_SNAKE_CASE :List[Any] = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' snake_case_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def UpperCamelCase_ ( self : int ,A : Optional[int] ,A : List[str] ,A : Union[str, Any] ): __A = pipeline( "document-question-answering" ,model=A ,tokenizer=A ,image_processor=A ) __A = INVOICE_URL __A = list(zip(*apply_tesseract(load_image(A ) ,A ,"" ) ) ) __A = "What is the placebo?" __A = [ { "image": load_image(A ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def UpperCamelCase_ ( self : Optional[int] ,A : Any ,A : List[str] ): __A = dqa_pipeline(A ,top_k=2 ) self.assertEqual( A ,[ [ {"score": ANY(A ), "answer": ANY(A ), "start": ANY(A ), "end": ANY(A )}, {"score": ANY(A ), "answer": ANY(A ), "start": ANY(A ), "end": ANY(A )}, ] ] * 3 ,) @require_torch @require_detectrona @require_pytesseract def UpperCamelCase_ ( self : str ): __A = pipeline("document-question-answering" ,model="hf-internal-testing/tiny-random-layoutlmv2" ) __A = INVOICE_URL __A = "How many cats are there?" __A = [ {"score": 0.00_01, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.00_01, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] __A = dqa_pipeline(image=A ,question=A ,top_k=2 ) self.assertEqual(nested_simplify(A ,decimals=4 ) ,A ) __A = dqa_pipeline({"image": image, "question": question} ,top_k=2 ) self.assertEqual(nested_simplify(A ,decimals=4 ) ,A ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably __A = "./tests/fixtures/tests_samples/COCO/000000039769.png" __A = dqa_pipeline(image=A ,question=A ,top_k=2 ) self.assertEqual(A ,[] ) # We can optionnally pass directly the words and bounding boxes __A = "./tests/fixtures/tests_samples/COCO/000000039769.png" __A = [] __A = [] __A = dqa_pipeline(image=A ,question=A ,words=A ,boxes=A ,top_k=2 ) self.assertEqual(A ,[] ) @slow @require_torch @require_detectrona @require_pytesseract def UpperCamelCase_ ( self : int ): __A = pipeline( "document-question-answering" ,model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" ,revision="9977165" ,) __A = INVOICE_URL __A = "What is the invoice number?" __A = dqa_pipeline(image=A ,question=A ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ {"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16}, ] ,) __A = dqa_pipeline({"image": image, "question": question} ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ {"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16}, ] ,) __A = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ [ {"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2 ,) @slow @require_torch @require_detectrona @require_pytesseract def UpperCamelCase_ ( self : Tuple ): __A = pipeline( "document-question-answering" ,model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" ,revision="9977165" ,max_seq_len=50 ,) __A = INVOICE_URL __A = "What is the invoice number?" __A = dqa_pipeline(image=A ,question=A ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ {"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16}, ] ,) __A = dqa_pipeline({"image": image, "question": question} ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ {"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16}, ] ,) __A = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ [ {"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 ,) @slow @require_torch @require_pytesseract @require_vision def UpperCamelCase_ ( self : Optional[int] ): __A = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" ,revision="3dc6de3" ,add_prefix_space=A ) __A = pipeline( "document-question-answering" ,model="impira/layoutlm-document-qa" ,tokenizer=A ,revision="3dc6de3" ,) __A = INVOICE_URL __A = "What is the invoice number?" __A = dqa_pipeline(image=A ,question=A ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ {"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23}, ] ,) __A = dqa_pipeline({"image": image, "question": question} ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ {"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23}, ] ,) __A = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ [ {"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2 ,) __A = list(zip(*apply_tesseract(load_image(A ) ,A ,"" ) ) ) # This model should also work if `image` is set to None __A = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ {"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23}, ] ,) @slow @require_torch @require_pytesseract @require_vision def UpperCamelCase_ ( self : List[Any] ): __A = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" ,revision="3dc6de3" ,add_prefix_space=A ) __A = pipeline( "document-question-answering" ,model="impira/layoutlm-document-qa" ,tokenizer=A ,revision="3dc6de3" ,max_seq_len=50 ,) __A = INVOICE_URL __A = "What is the invoice number?" __A = dqa_pipeline(image=A ,question=A ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ {"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16}, ] ,) __A = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ [ {"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 ,) __A = list(zip(*apply_tesseract(load_image(A ) ,A ,"" ) ) ) # This model should also work if `image` is set to None __A = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ {"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16}, ] ,) @slow @require_torch def UpperCamelCase_ ( self : Union[str, Any] ): __A = pipeline( "document-question-answering" ,model="naver-clova-ix/donut-base-finetuned-docvqa" ,tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) ,feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" ,) __A = INVOICE_URL __A = "What is the invoice number?" __A = dqa_pipeline(image=A ,question=A ,top_k=2 ) self.assertEqual(nested_simplify(A ,decimals=4 ) ,[{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def UpperCamelCase_ ( self : Optional[int] ): pass
15
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version SCREAMING_SNAKE_CASE :Union[str, Any] = get_logger(__name__) class UpperCAmelCase : '''simple docstring''' snake_case_ = "dummy_data" snake_case_ = "datasets" snake_case_ = False def __init__( self : Optional[int] ,A : str ,A : str ,A : Union[Version, str] ,A : Optional[str] = None ,A : bool = False ,A : bool = True ,A : Optional[List[Callable]] = None ,): __A = 0 __A = dataset_name __A = cache_dir __A = use_local_dummy_data __A = config # download_callbacks take a single url as input __A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root __A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general __A = str(A ) # to be downloaded __A = None __A = None @property def UpperCamelCase_ ( self : Union[str, Any] ): if self._dummy_file is None: __A = self.download_dummy_data() return self._dummy_file @property def UpperCamelCase_ ( self : Optional[Any] ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def UpperCamelCase_ ( self : List[Any] ): return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def UpperCamelCase_ ( self : Tuple ): __A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) __A = cached_path( A ,cache_dir=self.cache_dir ,extract_compressed_file=A ,force_extract=A ) return os.path.join(A ,self.dummy_file_name ) @property def UpperCamelCase_ ( self : str ): return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def UpperCamelCase_ ( self : Any ): if self._bucket_url is None: __A = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def UpperCamelCase_ ( self : Tuple ): # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,*A : Dict ): if self.load_existing_dummy_data: # dummy data is downloaded and tested __A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned __A = self.dummy_file_name # special case when data_url is a dict if isinstance(A ,A ): return self.create_dummy_data_dict(A ,A ) elif isinstance(A ,(list, tuple) ): return self.create_dummy_data_list(A ,A ) else: return self.create_dummy_data_single(A ,A ) def UpperCamelCase_ ( self : str ,A : List[Any] ,*A : List[Any] ): return self.download_and_extract(A ) def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Tuple ): return self.download_and_extract(A ) def UpperCamelCase_ ( self : Any ,A : Any ,*A : Optional[Any] ,**A : List[str] ): return path def UpperCamelCase_ ( self : str ): return {} def UpperCamelCase_ ( self : int ,A : int ,A : Tuple ): __A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(A ,A ): for single_url in single_urls: download_callback(A ) else: __A = single_urls download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(A ,A ): __A = [os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) for x in single_urls] else: __A = single_urls __A = os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) __A = value # make sure that values are unique if all(isinstance(A ,A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique __A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : str ): __A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one __A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,A ) ) for url in data_url ) __A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): __A = [data_url[0]] * len(A ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __A = os.path.join(A ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(A ) return dummy_data_list def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ): for download_callback in self.download_callbacks: download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __A = os.path.join(A ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(A ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCamelCase_ ( self : int ): pass def UpperCamelCase_ ( self : Dict ): pass def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ): def _iter_archive_members(A : Optional[Any] ): # this preserves the order of the members inside the ZIP archive __A = Path(self.dummy_file ).parent __A = path.relative_to(A ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: __A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(A ) __A = Path(A ) __A = _iter_archive_members(A ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(A ).as_posix(), file_path.open("rb" ) def UpperCamelCase_ ( self : List[Any] ,A : Any ): if not isinstance(A ,A ): __A = [paths] for path in paths: if os.path.isfile(A ): if os.path.basename(A ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(A ): if os.path.basename(A ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(A ): if filename.startswith((".", "__") ): continue yield os.path.join(A ,A )
15
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self : Dict ): __A = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) __A = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house __A = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim __A = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __A = model(A )["last_hidden_state"].detach() self.assertEqual(output.shape ,A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,A ,atol=1E-3 ) ) @slow def UpperCamelCase_ ( self : Optional[Any] ): __A = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) __A = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house __A = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim __A = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __A = model(A )["last_hidden_state"].detach() self.assertEqual(output.shape ,A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,A ,atol=1E-3 ) )
15
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE :List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :List[str] = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
1
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" return EnvironmentCommand() def UpperCAmelCase ( a_ ) -> Union[str, Any]: """simple docstring""" return EnvironmentCommand(args.accelerate_config_file ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @staticmethod def UpperCamelCase_ ( A : ArgumentParser ): __A = parser.add_parser("env" ) download_parser.set_defaults(func=A ) download_parser.add_argument( "--accelerate-config_file" ,default=A ,help="The accelerate config file to use for the default values in the launching script." ,) download_parser.set_defaults(func=A ) def __init__( self : str ,A : str ,*A : List[Any] ): __A = accelerate_config_file def UpperCamelCase_ ( self : Optional[int] ): __A = "not installed" if is_safetensors_available(): import safetensors __A = safetensors.__version__ elif importlib.util.find_spec("safetensors" ) is not None: import safetensors __A = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' __A = "not installed" __A = __A = "not found" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file __A = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(A ): __A = load_config_from_file(self._accelerate_config_file ).to_dict() __A = ( "\n".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(A ,A ) else f'''\t{accelerate_config}''' ) __A = "not installed" __A = "NA" if is_torch_available(): import torch __A = torch.__version__ __A = torch.cuda.is_available() __A = "not installed" __A = "NA" if is_tf_available(): import tensorflow as tf __A = tf.__version__ try: # deprecated in v2.1 __A = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool __A = bool(tf.config.list_physical_devices("GPU" ) ) __A = "not installed" __A = "not installed" __A = "not installed" __A = "NA" if is_flax_available(): import flax import jax import jaxlib __A = flax.__version__ __A = jax.__version__ __A = jaxlib.__version__ __A = jax.lib.xla_bridge.get_backend().platform __A = { "`transformers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface_hub version": huggingface_hub.__version__, "Safetensors version": f'''{safetensors_version}''', "Accelerate version": f'''{accelerate_version}''', "Accelerate config": f'''{accelerate_config_str}''', "PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''', "Tensorflow version (GPU?)": f'''{tf_version} ({tf_cuda_available})''', "Flax version (CPU?/GPU?/TPU?)": f'''{flax_version} ({jax_backend})''', "Jax version": f'''{jax_version}''', "JaxLib version": f'''{jaxlib_version}''', "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(A ) ) return info @staticmethod def UpperCamelCase_ ( A : Any ): return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
15
from typing import Dict, Optional import numpy as np import datasets SCREAMING_SNAKE_CASE :List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' SCREAMING_SNAKE_CASE :List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' SCREAMING_SNAKE_CASE :str = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Tuple: """simple docstring""" if label_map is not None: for old_id, new_id in label_map.items(): __A = new_id # turn into Numpy arrays __A = np.array(a_ ) __A = np.array(a_ ) if reduce_labels: __A = 2_5_5 __A = label - 1 __A = 2_5_5 __A = label != ignore_index __A = np.not_equal(a_ , a_ ) __A = pred_label[mask] __A = np.array(a_ )[mask] __A = pred_label[pred_label == label] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Union[str, Any]: """simple docstring""" __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(a_ , a_ ): __A , __A , __A , __A = intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = None , a_ = False , ) -> str: """simple docstring""" __A , __A , __A , __A = total_intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) # compute metrics __A = {} __A = total_area_intersect.sum() / total_area_label.sum() __A = total_area_intersect / total_area_union __A = total_area_intersect / total_area_label __A = np.nanmean(a_ ) __A = np.nanmean(a_ ) __A = all_acc __A = iou __A = acc if nan_to_num is not None: __A = {metric: np.nan_to_num(a_ , nan=a_ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), } ) ,reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ] ,) def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Optional[Any] ,A : int ,A : bool ,A : Optional[int] = None ,A : Optional[Dict[int, int]] = None ,A : bool = False ,): __A = mean_iou( results=A ,gt_seg_maps=A ,num_labels=A ,ignore_index=A ,nan_to_num=A ,label_map=A ,reduce_labels=A ,) return iou_result
15
1
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :List[str] = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE :Dict = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } SCREAMING_SNAKE_CASE :Optional[Any] = { 'AI-Sweden/gpt-sw3-126m': 2048, 'AI-Sweden/gpt-sw3-350m': 2048, 'AI-Sweden/gpt-sw3-1.6b': 2048, 'AI-Sweden/gpt-sw3-6.7b': 2048, 'AI-Sweden/gpt-sw3-20b': 2048, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] def __init__( self : Optional[int] ,A : Optional[Any] ,A : Optional[int]=False ,A : int=False ,A : Union[str, Any]=False ,A : int=None ,A : Optional[Any]=None ,A : Union[str, Any]=None ,A : Optional[Any]=None ,A : Optional[Dict[str, Any]] = None ,**A : Tuple ,): __A = {} if sp_model_kwargs is None else sp_model_kwargs __A = kwargs.get("name_or_path" ) if name_or_path is None: logger.warning( "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b," " you are testing the model, this can safely be ignored" ) __A = "None" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __A = "<|endoftext|>" if eos_token is None else eos_token __A = "<unk>" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __A = unk_token if pad_token is None else pad_token __A = eos_token if bos_token is None else bos_token else: __A = "<pad>" if pad_token is None else pad_token __A = "<s>" if bos_token is None else bos_token super().__init__( do_lower_case=A ,remove_space=A ,keep_accents=A ,bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = do_lower_case __A = remove_space __A = keep_accents __A = vocab_file __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) # Used for whitespace normalization in input texts # fmt : off __A = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __A = re.compile( f'''[{''.join(map(A ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(1_27 ,1_60 ) ) + [1_60, 1_73, 82_03] ) )}]''' ) def __getstate__( self : Optional[int] ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : Optional[Any] ,A : Union[str, Any] ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase_ ( self : List[str] ): return len(self.sp_model ) def UpperCamelCase_ ( self : int ,A : str ): __A = self.non_printing_characters_re.sub("" ,A ) # Normalize whitespaces __A = "".join([char if char not in self.whitespaces else " " for char in text] ) # NFC Unicode normalization __A = unicodedata.normalize("NFC" ,A ) return text def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,**A : Optional[int] ): __A = self.preprocess_text(A ) return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.PieceToId(A ) def UpperCamelCase_ ( self : Dict ,A : int ): return self.sp_model.IdToPiece(A ) @staticmethod def UpperCamelCase_ ( A : str ): return out_string def UpperCamelCase_ ( self : str ,A : List[str] ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string def UpperCamelCase_ ( self : str ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self : List[str] ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self : Union[str, Any] ,A : Union[str, List[str]] ,A : Union[str, bool] = False ): if isinstance(A ,A ): __A = self.preprocess_text(A ) __A = self.sp_model.encode(A ) else: __A = [self.preprocess_text(A ) for t in text] __A = self.sp_model.encode(A ) if return_tensors is True or return_tensors == "pt": __A = torch.tensor(A ) return token_ids def UpperCamelCase_ ( self : List[Any] ,A : Union[int, List[int]] ): return self.sp_model.decode(A ) def UpperCamelCase_ ( self : List[str] ,A : "Conversation" ): __A = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] __A = ( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(A ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=A )
15
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :List[str] = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE :Dict = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } SCREAMING_SNAKE_CASE :Optional[Any] = { 'AI-Sweden/gpt-sw3-126m': 2048, 'AI-Sweden/gpt-sw3-350m': 2048, 'AI-Sweden/gpt-sw3-1.6b': 2048, 'AI-Sweden/gpt-sw3-6.7b': 2048, 'AI-Sweden/gpt-sw3-20b': 2048, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] def __init__( self : Optional[int] ,A : Optional[Any] ,A : Optional[int]=False ,A : int=False ,A : Union[str, Any]=False ,A : int=None ,A : Optional[Any]=None ,A : Union[str, Any]=None ,A : Optional[Any]=None ,A : Optional[Dict[str, Any]] = None ,**A : Tuple ,): __A = {} if sp_model_kwargs is None else sp_model_kwargs __A = kwargs.get("name_or_path" ) if name_or_path is None: logger.warning( "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b," " you are testing the model, this can safely be ignored" ) __A = "None" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __A = "<|endoftext|>" if eos_token is None else eos_token __A = "<unk>" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __A = unk_token if pad_token is None else pad_token __A = eos_token if bos_token is None else bos_token else: __A = "<pad>" if pad_token is None else pad_token __A = "<s>" if bos_token is None else bos_token super().__init__( do_lower_case=A ,remove_space=A ,keep_accents=A ,bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = do_lower_case __A = remove_space __A = keep_accents __A = vocab_file __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) # Used for whitespace normalization in input texts # fmt : off __A = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __A = re.compile( f'''[{''.join(map(A ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(1_27 ,1_60 ) ) + [1_60, 1_73, 82_03] ) )}]''' ) def __getstate__( self : Optional[int] ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : Optional[Any] ,A : Union[str, Any] ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase_ ( self : List[str] ): return len(self.sp_model ) def UpperCamelCase_ ( self : int ,A : str ): __A = self.non_printing_characters_re.sub("" ,A ) # Normalize whitespaces __A = "".join([char if char not in self.whitespaces else " " for char in text] ) # NFC Unicode normalization __A = unicodedata.normalize("NFC" ,A ) return text def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,**A : Optional[int] ): __A = self.preprocess_text(A ) return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.PieceToId(A ) def UpperCamelCase_ ( self : Dict ,A : int ): return self.sp_model.IdToPiece(A ) @staticmethod def UpperCamelCase_ ( A : str ): return out_string def UpperCamelCase_ ( self : str ,A : List[str] ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string def UpperCamelCase_ ( self : str ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self : List[str] ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self : Union[str, Any] ,A : Union[str, List[str]] ,A : Union[str, bool] = False ): if isinstance(A ,A ): __A = self.preprocess_text(A ) __A = self.sp_model.encode(A ) else: __A = [self.preprocess_text(A ) for t in text] __A = self.sp_model.encode(A ) if return_tensors is True or return_tensors == "pt": __A = torch.tensor(A ) return token_ids def UpperCamelCase_ ( self : List[Any] ,A : Union[int, List[int]] ): return self.sp_model.decode(A ) def UpperCamelCase_ ( self : List[str] ,A : "Conversation" ): __A = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] __A = ( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(A ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=A )
15
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' snake_case_ = StableDiffusionLDMaDPipeline snake_case_ = TEXT_TO_IMAGE_PARAMS snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self : Optional[int] ): torch.manual_seed(0 ) __A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,) __A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule="scaled_linear" ,clip_sample=A ,set_alpha_to_one=A ,) torch.manual_seed(0 ) __A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,) torch.manual_seed(0 ) __A = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) __A = CLIPTextModel(A ) __A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def UpperCamelCase_ ( self : Optional[Any] ,A : Any ,A : List[Any]=0 ): if str(A ).startswith("mps" ): __A = torch.manual_seed(A ) else: __A = torch.Generator(device=A ).manual_seed(A ) __A = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCamelCase_ ( self : List[str] ): __A = "cpu" # ensure determinism for the device-dependent torch.Generator __A = self.get_dummy_components() __A = StableDiffusionLDMaDPipeline(**A ) __A = ldmad_pipe.to(A ) ldmad_pipe.set_progress_bar_config(disable=A ) __A = self.get_dummy_inputs(A ) __A = ldmad_pipe(**A ) __A , __A = output.rgb, output.depth __A = rgb[0, -3:, -3:, -1] __A = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) __A = np.array( [0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] ) __A = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2 def UpperCamelCase_ ( self : Dict ): __A = self.get_dummy_components() __A = StableDiffusionLDMaDPipeline(**A ) __A = ldmad_pipe.to(A ) ldmad_pipe.set_progress_bar_config(disable=A ) __A = self.get_dummy_inputs(A ) __A = 3 * [inputs["prompt"]] # forward __A = ldmad_pipe(**A ) __A , __A = output.rgb, output.depth __A = rgb_slice_a[0, -3:, -3:, -1] __A = depth_slice_a[0, -3:, -1] __A = self.get_dummy_inputs(A ) __A = 3 * [inputs.pop("prompt" )] __A = ldmad_pipe.tokenizer( A ,padding="max_length" ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=A ,return_tensors="pt" ,) __A = text_inputs["input_ids"].to(A ) __A = ldmad_pipe.text_encoder(A )[0] __A = prompt_embeds # forward __A = ldmad_pipe(**A ) __A , __A = output.rgb, output.depth __A = rgb_slice_a[0, -3:, -3:, -1] __A = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4 def UpperCamelCase_ ( self : Optional[int] ): __A = "cpu" # ensure determinism for the device-dependent torch.Generator __A = self.get_dummy_components() __A = PNDMScheduler(skip_prk_steps=A ) __A = StableDiffusionLDMaDPipeline(**A ) __A = ldmad_pipe.to(A ) ldmad_pipe.set_progress_bar_config(disable=A ) __A = self.get_dummy_inputs(A ) __A = "french fries" __A = ldmad_pipe(**A ,negative_prompt=A ) __A , __A = output.rgb, output.depth __A = rgb[0, -3:, -3:, -1] __A = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) __A = np.array( [0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] ) __A = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2 @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : Any ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self : Tuple ,A : Dict ,A : str="cpu" ,A : Dict=torch.floataa ,A : Optional[Any]=0 ): __A = torch.Generator(device=A ).manual_seed(A ) __A = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) ) __A = torch.from_numpy(A ).to(device=A ,dtype=A ) __A = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def UpperCamelCase_ ( self : List[str] ): __A = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ) __A = ldmad_pipe.to(A ) ldmad_pipe.set_progress_bar_config(disable=A ) __A = self.get_inputs(A ) __A = ldmad_pipe(**A ) __A , __A = output.rgb, output.depth __A = rgb[0, -3:, -3:, -1].flatten() __A = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12) __A = np.array( [0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] ) __A = np.array( [0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3 @nightly @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : List[str] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self : Union[str, Any] ,A : Any ,A : Optional[int]="cpu" ,A : List[str]=torch.floataa ,A : Optional[int]=0 ): __A = torch.Generator(device=A ).manual_seed(A ) __A = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) ) __A = torch.from_numpy(A ).to(device=A ,dtype=A ) __A = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def UpperCamelCase_ ( self : Optional[int] ): __A = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(A ) ldmad_pipe.set_progress_bar_config(disable=A ) __A = self.get_inputs(A ) __A = ldmad_pipe(**A ) __A , __A = output.rgb, output.depth __A = 0.49_55_86 __A = 0.33_79_55_15 __A = 1_12.4_85_18 __A = 98.48_97_46 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3 def UpperCamelCase_ ( self : Optional[Any] ): __A = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(A ) ldmad_pipe.set_progress_bar_config(disable=A ) __A = self.get_inputs(A ) __A = ldmad_pipe(**A ) __A , __A = output.rgb, output.depth __A = 0.4_19_41_27 __A = 0.35_37_55_86 __A = 0.5_63_85_02 __A = 0.34_68_61_03 assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3
15
import numpy as np def UpperCAmelCase ( a_ , a_ , a_ = 1E-12 , a_ = 1_0_0 , ) -> tuple[float, np.ndarray]: """simple docstring""" assert np.shape(a_ )[0] == np.shape(a_ )[1] # Ensure proper dimensionality. assert np.shape(a_ )[0] == np.shape(a_ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ ) __A = np.iscomplexobj(a_ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(a_ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __A = False __A = 0 __A = 0 __A = 1E12 while not convergence: # Multiple matrix by the vector. __A = np.dot(a_ , a_ ) # Normalize the resulting output vector. __A = w / np.linalg.norm(a_ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __A = vector.conj().T if is_complex else vector.T __A = np.dot(a_ , np.dot(a_ , a_ ) ) # Check convergence. __A = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __A = True __A = lambda_ if is_complex: __A = np.real(lambda_ ) return lambda_, vector def UpperCAmelCase ( ) -> None: """simple docstring""" __A = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] ) __A = np.array([4_1, 4, 2_0] ) __A = real_input_matrix.astype(np.complexaaa ) __A = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __A = np.array([4_1, 4, 2_0] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __A = real_input_matrix __A = real_vector elif problem_type == "complex": __A = complex_input_matrix __A = complex_vector # Our implementation. __A , __A = power_iteration(a_ , a_ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __A , __A = np.linalg.eigh(a_ ) # Last eigenvalue is the maximum one. __A = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __A = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
15
1
def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" __A = [0] * len(a_ ) __A = [] __A = [1] * len(a_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(a_ ) ): if indegree[i] == 0: queue.append(a_ ) while queue: __A = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: __A = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(a_ ) print(max(a_ ) ) # Adjacency list of Graph SCREAMING_SNAKE_CASE :List[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
15
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE :str = 'RegNetConfig' # Base docstring SCREAMING_SNAKE_CASE :List[str] = 'facebook/regnet-y-040' SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 1088, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE :Optional[int] = 'facebook/regnet-y-040' SCREAMING_SNAKE_CASE :Any = 'tabby, tabby cat' SCREAMING_SNAKE_CASE :Optional[int] = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple ,A : int ,A : int = 3 ,A : int = 1 ,A : int = 1 ,A : Optional[str] = "relu" ,**A : Dict ,): super().__init__(**A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __A = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __A = tf.keras.layers.ConvaD( filters=A ,kernel_size=A ,strides=A ,padding="VALID" ,groups=A ,use_bias=A ,name="convolution" ,) __A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" ) __A = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : List[Any] ,A : Any ): __A = self.convolution(self.padding(A ) ) __A = self.normalization(A ) __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple ,A : RegNetConfig ,**A : str ): super().__init__(**A ) __A = config.num_channels __A = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="embedder" ,) def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ): __A = shape_list(A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __A = tf.transpose(A ,perm=(0, 2, 3, 1) ) __A = self.embedder(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Optional[int] ,A : int ,A : int = 2 ,**A : Tuple ): super().__init__(**A ) __A = tf.keras.layers.ConvaD( filters=A ,kernel_size=1 ,strides=A ,use_bias=A ,name="convolution" ) __A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" ) def UpperCamelCase_ ( self : Union[str, Any] ,A : tf.Tensor ,A : bool = False ): return self.normalization(self.convolution(A ) ,training=A ) class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Dict ,A : int ,A : int ,**A : str ): super().__init__(**A ) __A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" ) __A = [ tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="relu" ,name="attention.0" ), tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="sigmoid" ,name="attention.2" ), ] def UpperCamelCase_ ( self : Dict ,A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __A = self.pooler(A ) for layer_module in self.attention: __A = layer_module(A ) __A = hidden_state * pooled return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : Optional[int] ): super().__init__(**A ) __A = in_channels != out_channels or stride != 1 __A = max(1 ,out_channels // config.groups_width ) __A = ( TFRegNetShortCut(A ,stride=A ,name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" ,name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __A = [ TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ), TFRegNetConvLayer( A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ), TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.2" ), ] __A = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : int ,A : Optional[int] ): __A = hidden_state for layer_module in self.layers: __A = layer_module(A ) __A = self.shortcut(A ) hidden_state += residual __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[Any] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : str ): super().__init__(**A ) __A = in_channels != out_channels or stride != 1 __A = max(1 ,out_channels // config.groups_width ) __A = ( TFRegNetShortCut(A ,stride=A ,name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" ,name="shortcut" ) ) __A = [ TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ), TFRegNetConvLayer( A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ), TFRegNetSELayer(A ,reduced_channels=int(round(in_channels / 4 ) ) ,name="layer.2" ), TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.3" ), ] __A = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict ,A : Any ): __A = hidden_state for layer_module in self.layers: __A = layer_module(A ) __A = self.shortcut(A ) hidden_state += residual __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 2 ,A : int = 2 ,**A : Optional[int] ): super().__init__(**A ) __A = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __A = [ # downsampling is done in the first layer with stride of 2 layer(A ,A ,A ,stride=A ,name="layers.0" ), *[layer(A ,A ,A ,name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Any ,A : List[str] ): for layer_module in self.layers: __A = layer_module(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any ,A : RegNetConfig ,**A : List[str] ): super().__init__(**A ) __A = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( A ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="stages.0" ,) ) __A = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(A ,config.depths[1:] ) ): self.stages.append(TFRegNetStage(A ,A ,A ,depth=A ,name=f'''stages.{i+1}''' ) ) def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor ,A : bool = False ,A : bool = True ): __A = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __A = hidden_states + (hidden_state,) __A = stage_module(A ) if output_hidden_states: __A = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=A ,hidden_states=A ) @keras_serializable class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' snake_case_ = RegNetConfig def __init__( self : int ,A : Optional[int] ,**A : Dict ): super().__init__(**A ) __A = config __A = TFRegNetEmbeddings(A ,name="embedder" ) __A = TFRegNetEncoder(A ,name="encoder" ) __A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" ) @unpack_inputs def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : bool = False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.embedder(A ,training=A ) __A = self.encoder( A ,output_hidden_states=A ,return_dict=A ,training=A ) __A = encoder_outputs[0] __A = self.pooler(A ) # Change to NCHW output format have uniformity in the modules __A = tf.transpose(A ,perm=(0, 3, 1, 2) ) __A = tf.transpose(A ,perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __A = tuple([tf.transpose(A ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A ,pooler_output=A ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = RegNetConfig snake_case_ = "regnet" snake_case_ = "pixel_values" @property def UpperCamelCase_ ( self : Optional[Any] ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )} SCREAMING_SNAKE_CASE :Dict = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' SCREAMING_SNAKE_CASE :Dict = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,A : RegNetConfig ,*A : List[Any] ,**A : str ): super().__init__(A ,*A ,**A ) __A = TFRegNetMainLayer(A ,name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : int=False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.regnet( pixel_values=A ,output_hidden_states=A ,return_dict=A ,training=A ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[int] ,A : RegNetConfig ,*A : str ,**A : Tuple ): super().__init__(A ,*A ,**A ) __A = config.num_labels __A = TFRegNetMainLayer(A ,name="regnet" ) # classification head __A = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor = None ,A : tf.Tensor = None ,A : bool = None ,A : bool = None ,A : Union[str, Any]=False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.regnet( A ,output_hidden_states=A ,return_dict=A ,training=A ) __A = outputs.pooler_output if return_dict else outputs[1] __A = self.classifier[0](A ) __A = self.classifier[1](A ) __A = None if labels is None else self.hf_compute_loss(labels=A ,logits=A ) if not return_dict: __A = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=A ,logits=A ,hidden_states=outputs.hidden_states )
15
1
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : Optional[int] ): debug_launcher(test_script.main ) def UpperCamelCase_ ( self : Optional[Any] ): debug_launcher(test_ops.main )
15
import math def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list: """simple docstring""" __A = end or len(a_ ) for i in range(a_ , a_ ): __A = i __A = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __A = array[temp_index - 1] temp_index -= 1 __A = temp_index_value return array def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap """simple docstring""" __A = index __A = 2 * index + 1 # Left Node __A = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __A = left_index if right_index < heap_size and array[largest] < array[right_index]: __A = right_index if largest != index: __A , __A = array[largest], array[index] heapify(a_ , a_ , a_ ) def UpperCAmelCase ( a_ ) -> list: """simple docstring""" __A = len(a_ ) for i in range(n // 2 , -1 , -1 ): heapify(a_ , a_ , a_ ) for i in range(n - 1 , 0 , -1 ): __A , __A = array[0], array[i] heapify(a_ , 0 , a_ ) return array def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" __A = low __A = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __A , __A = array[j], array[i] i += 1 def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) == 0: return array __A = 2 * math.ceil(math.loga(len(a_ ) ) ) __A = 1_6 return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ ) def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(a_ ) max_depth -= 1 __A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 ) __A = partition(a_ , a_ , a_ , a_ ) intro_sort(a_ , a_ , a_ , a_ , a_ ) __A = p return insertion_sort(a_ , a_ , a_ ) if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip() SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')] print(sort(unsorted))
15
1
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version SCREAMING_SNAKE_CASE :Union[str, Any] = get_logger(__name__) class UpperCAmelCase : '''simple docstring''' snake_case_ = "dummy_data" snake_case_ = "datasets" snake_case_ = False def __init__( self : Optional[int] ,A : str ,A : str ,A : Union[Version, str] ,A : Optional[str] = None ,A : bool = False ,A : bool = True ,A : Optional[List[Callable]] = None ,): __A = 0 __A = dataset_name __A = cache_dir __A = use_local_dummy_data __A = config # download_callbacks take a single url as input __A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root __A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general __A = str(A ) # to be downloaded __A = None __A = None @property def UpperCamelCase_ ( self : Union[str, Any] ): if self._dummy_file is None: __A = self.download_dummy_data() return self._dummy_file @property def UpperCamelCase_ ( self : Optional[Any] ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def UpperCamelCase_ ( self : List[Any] ): return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def UpperCamelCase_ ( self : Tuple ): __A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) __A = cached_path( A ,cache_dir=self.cache_dir ,extract_compressed_file=A ,force_extract=A ) return os.path.join(A ,self.dummy_file_name ) @property def UpperCamelCase_ ( self : str ): return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def UpperCamelCase_ ( self : Any ): if self._bucket_url is None: __A = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def UpperCamelCase_ ( self : Tuple ): # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,*A : Dict ): if self.load_existing_dummy_data: # dummy data is downloaded and tested __A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned __A = self.dummy_file_name # special case when data_url is a dict if isinstance(A ,A ): return self.create_dummy_data_dict(A ,A ) elif isinstance(A ,(list, tuple) ): return self.create_dummy_data_list(A ,A ) else: return self.create_dummy_data_single(A ,A ) def UpperCamelCase_ ( self : str ,A : List[Any] ,*A : List[Any] ): return self.download_and_extract(A ) def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Tuple ): return self.download_and_extract(A ) def UpperCamelCase_ ( self : Any ,A : Any ,*A : Optional[Any] ,**A : List[str] ): return path def UpperCamelCase_ ( self : str ): return {} def UpperCamelCase_ ( self : int ,A : int ,A : Tuple ): __A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(A ,A ): for single_url in single_urls: download_callback(A ) else: __A = single_urls download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(A ,A ): __A = [os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) for x in single_urls] else: __A = single_urls __A = os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) __A = value # make sure that values are unique if all(isinstance(A ,A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique __A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : str ): __A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one __A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,A ) ) for url in data_url ) __A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): __A = [data_url[0]] * len(A ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __A = os.path.join(A ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(A ) return dummy_data_list def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ): for download_callback in self.download_callbacks: download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __A = os.path.join(A ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(A ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCamelCase_ ( self : int ): pass def UpperCamelCase_ ( self : Dict ): pass def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ): def _iter_archive_members(A : Optional[Any] ): # this preserves the order of the members inside the ZIP archive __A = Path(self.dummy_file ).parent __A = path.relative_to(A ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: __A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(A ) __A = Path(A ) __A = _iter_archive_members(A ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(A ).as_posix(), file_path.open("rb" ) def UpperCamelCase_ ( self : List[Any] ,A : Any ): if not isinstance(A ,A ): __A = [paths] for path in paths: if os.path.isfile(A ): if os.path.basename(A ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(A ): if os.path.basename(A ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(A ): if filename.startswith((".", "__") ): continue yield os.path.join(A ,A )
15
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml SCREAMING_SNAKE_CASE :Optional[int] = NewType('DataClass', Any) SCREAMING_SNAKE_CASE :int = NewType('DataClassType', Any) def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" if isinstance(a_ , a_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' ) def UpperCAmelCase ( a_ ) -> Callable[[str], Any]: """simple docstring""" __A = {str(a_ ): choice for choice in choices} return lambda a_ : str_to_choice.get(a_ , a_ ) def UpperCAmelCase ( *, a_ = None , a_ = None , a_ = dataclasses.MISSING , a_ = dataclasses.MISSING , a_ = None , **a_ , ) -> dataclasses.Field: """simple docstring""" if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls __A = {} if aliases is not None: __A = aliases if help is not None: __A = help return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_ ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 def __init__( self : Union[str, Any] ,A : Union[DataClassType, Iterable[DataClassType]] ,**A : List[Any] ): # To make the default appear when using --help if "formatter_class" not in kwargs: __A = ArgumentDefaultsHelpFormatter super().__init__(**A ) if dataclasses.is_dataclass(A ): __A = [dataclass_types] __A = list(A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(A ) @staticmethod def UpperCamelCase_ ( A : ArgumentParser ,A : dataclasses.Field ): __A = f'''--{field.name}''' __A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type ,A ): raise RuntimeError( "Unresolved type detected, which should have been done with the help of " "`typing.get_type_hints` method by default" ) __A = kwargs.pop("aliases" ,[] ) if isinstance(A ,A ): __A = [aliases] __A = getattr(field.type ,"__origin__" ,field.type ) if origin_type is Union or (hasattr(A ,"UnionType" ) and isinstance(A ,types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(A ) not in field.type.__args__ ): raise ValueError( "Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because" " the argument parser only supports one type per argument." f''' Problem encountered in field \'{field.name}\'.''' ) if type(A ) not in field.type.__args__: # filter `str` in Union __A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] __A = getattr(field.type ,"__origin__" ,field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) __A = ( field.type.__args__[0] if isinstance(A ,field.type.__args__[1] ) else field.type.__args__[1] ) __A = getattr(field.type ,"__origin__" ,field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) __A = {} if origin_type is Literal or (isinstance(field.type ,A ) and issubclass(field.type ,A )): if origin_type is Literal: __A = field.type.__args__ else: __A = [x.value for x in field.type] __A = make_choice_type_function(kwargs["choices"] ) if field.default is not dataclasses.MISSING: __A = field.default else: __A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument __A = copy(A ) # Hack because type=bool in argparse does not behave as we want. __A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. __A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way __A = default # This tells argparse we accept 0 or 1 value after --field_name __A = "?" # This is the value that will get picked if we do --field_name (without value) __A = True elif isclass(A ) and issubclass(A ,A ): __A = field.type.__args__[0] __A = "+" if field.default_factory is not dataclasses.MISSING: __A = field.default_factory() elif field.default is dataclasses.MISSING: __A = True else: __A = field.type if field.default is not dataclasses.MISSING: __A = field.default elif field.default_factory is not dataclasses.MISSING: __A = field.default_factory() else: __A = True parser.add_argument(A ,*A ,**A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): __A = False parser.add_argument(f'''--no_{field.name}''' ,action="store_false" ,dest=field.name ,**A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : DataClassType ): if hasattr(A ,"_argument_group_name" ): __A = self.add_argument_group(dtype._argument_group_name ) else: __A = self try: __A = get_type_hints(A ) except NameError: raise RuntimeError( f'''Type resolution failed for {dtype}. Try declaring the class in global scope or ''' "removing line of `from __future__ import annotations` which opts in Postponed " "Evaluation of Annotations (PEP 563)" ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A ): __A = ".".join(map(A ,sys.version_info[:3] ) ) raise RuntimeError( f'''Type resolution failed for {dtype} on Python {python_version}. Try removing ''' "line of `from __future__ import annotations` which opts in union types as " "`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To " "support Python versions that lower than 3.10, you need to use " "`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of " "`X | None`." ) from ex raise for field in dataclasses.fields(A ): if not field.init: continue __A = type_hints[field.name] self._parse_dataclass_field(A ,A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : List[Any]=None ,A : List[Any]=False ,A : Optional[Any]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=None ,): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): __A = [] if args_filename: args_files.append(Path(A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values __A = ArgumentParser() args_file_parser.add_argument(A ,type=A ,action="append" ) # Use only remaining args for further parsing (remove the args_file_flag) __A , __A = args_file_parser.parse_known_args(args=A ) __A = vars(A ).get(args_file_flag.lstrip("-" ) ,A ) if cmd_args_file_paths: args_files.extend([Path(A ) for p in cmd_args_file_paths] ) __A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last __A = file_args + args if args is not None else file_args + sys.argv[1:] __A , __A = self.parse_known_args(args=A ) __A = [] for dtype in self.dataclass_types: __A = {f.name for f in dataclasses.fields(A ) if f.init} __A = {k: v for k, v in vars(A ).items() if k in keys} for k in keys: delattr(A ,A ) __A = dtype(**A ) outputs.append(A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' ) return (*outputs,) def UpperCamelCase_ ( self : Dict ,A : Dict[str, Any] ,A : bool = False ): __A = set(args.keys() ) __A = [] for dtype in self.dataclass_types: __A = {f.name for f in dataclasses.fields(A ) if f.init} __A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) __A = dtype(**A ) outputs.append(A ) if not allow_extra_keys and unused_keys: raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(A )}''' ) return tuple(A ) def UpperCamelCase_ ( self : List[str] ,A : str ,A : bool = False ): with open(Path(A ) ,encoding="utf-8" ) as open_json_file: __A = json.loads(open_json_file.read() ) __A = self.parse_dict(A ,allow_extra_keys=A ) return tuple(A ) def UpperCamelCase_ ( self : int ,A : str ,A : bool = False ): __A = self.parse_dict(yaml.safe_load(Path(A ).read_text() ) ,allow_extra_keys=A ) return tuple(A )
15
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :str = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "sew-d" def __init__( self : List[str] ,A : Dict=32 ,A : Optional[int]=7_68 ,A : Dict=12 ,A : Union[str, Any]=12 ,A : Any=30_72 ,A : List[str]=2 ,A : List[str]=5_12 ,A : str=2_56 ,A : int=True ,A : Any=True ,A : Tuple=("p2c", "c2p") ,A : str="layer_norm" ,A : List[str]="gelu_python" ,A : Union[str, Any]=0.1 ,A : Optional[Any]=0.1 ,A : Dict=0.1 ,A : Optional[int]=0.0 ,A : List[str]=0.1 ,A : int=0.02 ,A : Any=1E-7 ,A : List[Any]=1E-5 ,A : str="group" ,A : int="gelu" ,A : List[str]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) ,A : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,A : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,A : Union[str, Any]=False ,A : Dict=1_28 ,A : Dict=16 ,A : Optional[Any]=True ,A : List[str]=0.05 ,A : Dict=10 ,A : Tuple=2 ,A : List[Any]=0.0 ,A : List[str]=10 ,A : List[Any]=0 ,A : Dict="mean" ,A : str=False ,A : int=False ,A : Optional[int]=2_56 ,A : Optional[int]=0 ,A : List[Any]=1 ,A : Tuple=2 ,**A : Dict ,): super().__init__(**A ,pad_token_id=A ,bos_token_id=A ,eos_token_id=A ) __A = hidden_size __A = feat_extract_norm __A = feat_extract_activation __A = list(A ) __A = list(A ) __A = list(A ) __A = conv_bias __A = num_conv_pos_embeddings __A = num_conv_pos_embedding_groups __A = len(self.conv_dim ) __A = num_hidden_layers __A = intermediate_size __A = squeeze_factor __A = max_position_embeddings __A = position_buckets __A = share_att_key __A = relative_attention __A = norm_rel_ebd __A = list(A ) __A = hidden_act __A = num_attention_heads __A = hidden_dropout __A = attention_dropout __A = activation_dropout __A = feat_proj_dropout __A = final_dropout __A = layer_norm_eps __A = feature_layer_norm_eps __A = initializer_range __A = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __A = apply_spec_augment __A = mask_time_prob __A = mask_time_length __A = mask_time_min_masks __A = mask_feature_prob __A = mask_feature_length __A = mask_feature_min_masks # ctc loss __A = ctc_loss_reduction __A = ctc_zero_infinity # sequence classification __A = use_weighted_layer_sum __A = classifier_proj_size @property def UpperCamelCase_ ( self : List[str] ): return functools.reduce(operator.mul ,self.conv_stride ,1 )
15
SCREAMING_SNAKE_CASE :Any = 256 # Modulus to hash a string SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003 def UpperCAmelCase ( a_ , a_ ) -> bool: """simple docstring""" __A = len(a_ ) __A = len(a_ ) if p_len > t_len: return False __A = 0 __A = 0 __A = 1 # Calculating the hash of pattern and substring of text for i in range(a_ ): __A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ) -> None: """simple docstring""" __A = "abc1abc12" __A = "alskfjaldsabc1abc1abc12k23adsfabcabc" __A = "alskfjaldsk23adsfabcabc" assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ ) # Test 2) __A = "ABABX" __A = "ABABZABABYABABX" assert rabin_karp(a_ , a_ ) # Test 3) __A = "AAAB" __A = "ABAAAAAB" assert rabin_karp(a_ , a_ ) # Test 4) __A = "abcdabcy" __A = "abcxabcdabxabcdabcdabcy" assert rabin_karp(a_ , a_ ) # Test 5) __A = "Lü" __A = "Lüsai" assert rabin_karp(a_ , a_ ) __A = "Lue" assert not rabin_karp(a_ , a_ ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
15
1
SCREAMING_SNAKE_CASE :int = { 0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f', } def UpperCAmelCase ( a_ ) -> str: """simple docstring""" assert type(a_ ) in (int, float) and decimal == int(a_ ) __A = int(a_ ) __A = "" __A = False if decimal < 0: __A = True decimal *= -1 while decimal > 0: __A , __A = divmod(a_ , 1_6 ) __A = values[remainder] + hexadecimal __A = "0x" + hexadecimal if negative: __A = "-" + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
15
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel SCREAMING_SNAKE_CASE :Union[str, Any] = False SCREAMING_SNAKE_CASE :Any = True SCREAMING_SNAKE_CASE :Tuple = False if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() parser.add_argument( '--repo_path', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE :Dict = { 'image_size': 'sample_size', 'num_res_blocks': 'layers_per_block', 'block_channels': 'block_out_channels', 'down_blocks': 'down_block_types', 'up_blocks': 'up_block_types', 'downscale_freq_shift': 'freq_shift', 'resnet_num_groups': 'norm_num_groups', 'resnet_act_fn': 'act_fn', 'resnet_eps': 'norm_eps', 'num_head_channels': 'attention_head_dim', } SCREAMING_SNAKE_CASE :Optional[int] = { 'time_steps': 'time_proj', 'mid': 'mid_block', 'downsample_blocks': 'down_blocks', 'upsample_blocks': 'up_blocks', } SCREAMING_SNAKE_CASE :int = '' if has_file(args.repo_path, 'config.json') else 'unet' with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader: SCREAMING_SNAKE_CASE :Dict = reader.read() SCREAMING_SNAKE_CASE :List[str] = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, 'config.json'): SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel(**config) else: SCREAMING_SNAKE_CASE :Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel SCREAMING_SNAKE_CASE :List[str] = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) SCREAMING_SNAKE_CASE :List[str] = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: SCREAMING_SNAKE_CASE :Optional[Any] = config[key] del config[key] SCREAMING_SNAKE_CASE :Optional[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']] SCREAMING_SNAKE_CASE :List[Any] = [k.replace('UNetRes', '') for k in config['up_block_types']] if do_only_weights: SCREAMING_SNAKE_CASE :Tuple = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin')) SCREAMING_SNAKE_CASE :Any = {} for param_key, param_value in state_dict.items(): if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'): continue SCREAMING_SNAKE_CASE :List[str] = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('.')[0] == key: SCREAMING_SNAKE_CASE :List[Any] = param_value SCREAMING_SNAKE_CASE :str = True if not has_changed: SCREAMING_SNAKE_CASE :List[str] = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
15
1
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def UpperCAmelCase ( a_ ) -> Tuple: """simple docstring""" __A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(a_ , a_ ) def UpperCAmelCase ( a_ ) -> Any: """simple docstring""" __A , __A = emb.weight.shape __A = nn.Linear(a_ , a_ , bias=a_ ) __A = emb.weight.data return lin_layer def UpperCAmelCase ( a_ , a_=None ) -> Union[str, Any]: """simple docstring""" __A = {} for old_key in state_dict.keys(): __A = old_key if "moe_layer.experts." in key: if expert_idx is not None: __A = key.replace("moe_layer.experts.0" , F'''ffn.experts.expert_{expert_idx}''' ) else: __A = key.replace("moe_layer.experts." , "ffn.experts.expert_" ) if "gate" in key: __A = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" ) if "fc2" and "experts" not in key: __A = key.replace(".fc2." , ".ffn.fc2." ) if "fc1" and "experts" not in key: __A = key.replace(".fc1." , ".ffn.fc1." ) if ".encoder_attn." in key: __A = key.replace(".encoder_attn." , ".cross_attention." ) if "encoder_attn_layer_norm" in key: __A = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" ) if "final_layer_norm" in key: __A = key.replace("final_layer_norm" , "ff_layer_norm" ) __A = state_dict[old_key] return new_dict def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = WEIGHTS_NAME ) -> Union[str, Any]: """simple docstring""" __A = [] __A = 0 os.makedirs(a_ , exist_ok=a_ ) for expert in range(a_ ): __A = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(a_ ): __A = torch.load(a_ )["model"] remove_ignore_keys_(a_ ) __A = rename_fairseq_keys(a_ , a_ ) __A = os.path.join( a_ , weights_name.replace(".bin" , F'''-{len(a_ )+1:05d}-of-???.bin''' ) ) torch.save(a_ , a_ ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(a_ )[0]].dtype ) # Add the last block __A = os.path.join(a_ , weights_name.replace(".bin" , F'''-{len(a_ )+1:05d}-of-???.bin''' ) ) __A = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(a_ ) __A = rename_fairseq_keys(a_ , a_ ) __A = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(a_ ) == 1: __A = os.path.join(a_ , a_ ) torch.save(a_ , a_ ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(a_ , a_ ) # Otherwise, let's build the index __A = {} for idx, shard in enumerate(a_ ): __A = weights_name.replace(".bin" , F'''-{idx+1:05d}-of-{len(a_ ):05d}.bin''' ) __A = os.path.join(a_ , weights_name.replace(".bin" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(a_ , os.path.join(a_ , a_ ) ) for key in shard: __A = shard_file # Add the metadata __A = {"total_size": total_size} __A = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(a_ , a_ ) , "w" , encoding="utf-8" ) as f: __A = json.dumps(a_ , indent=2 , sort_keys=a_ ) + "\n" f.write(a_ ) return metadata, index if __name__ == "__main__": SCREAMING_SNAKE_CASE :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--nllb_moe_checkpoint_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b', type=str, required=False, help='Path to the output pytorch model.', ) SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Dict = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) SCREAMING_SNAKE_CASE :Dict = NllbMoeConfig.from_pretrained( 'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) SCREAMING_SNAKE_CASE :Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('Done') model.save_pretrained(args.pytorch_dump_folder_path)
15
import argparse import math import traceback import dateutil.parser as date_parser import requests def UpperCAmelCase ( a_ ) -> str: """simple docstring""" __A = {} __A = job["started_at"] __A = job["completed_at"] __A = date_parser.parse(a_ ) __A = date_parser.parse(a_ ) __A = round((end_datetime - start_datetime).total_seconds() / 60.0 ) __A = start __A = end __A = duration_in_min return job_info def UpperCAmelCase ( a_ , a_=None ) -> str: """simple docstring""" __A = None if token is not None: __A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} __A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' __A = requests.get(a_ , headers=a_ ).json() __A = {} try: job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} ) __A = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 ) for i in range(a_ ): __A = requests.get(url + F'''&page={i + 2}''' , headers=a_ ).json() job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args() SCREAMING_SNAKE_CASE :Union[str, Any] = get_job_time(args.workflow_run_id) SCREAMING_SNAKE_CASE :Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'''{k}: {v["duration"]}''')
15
1
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self : str ): __A = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A ,"embed_dim" ) ) self.parent.assertTrue(hasattr(A ,"num_heads" ) ) class UpperCAmelCase : '''simple docstring''' def __init__( self : str ,A : Tuple ,A : Dict=13 ,A : Optional[int]=64 ,A : Optional[Any]=3 ,A : List[Any]=[16, 48, 96] ,A : Union[str, Any]=[1, 3, 6] ,A : str=[1, 2, 10] ,A : Optional[int]=[7, 3, 3] ,A : List[Any]=[4, 2, 2] ,A : int=[2, 1, 1] ,A : Tuple=[2, 2, 2] ,A : Any=[False, False, True] ,A : Any=[0.0, 0.0, 0.0] ,A : Optional[int]=0.02 ,A : Optional[Any]=1E-12 ,A : List[str]=True ,A : Union[str, Any]=True ,A : List[Any]=2 ,): __A = parent __A = batch_size __A = image_size __A = patch_sizes __A = patch_stride __A = patch_padding __A = is_training __A = use_labels __A = num_labels __A = num_channels __A = embed_dim __A = num_heads __A = stride_kv __A = depth __A = cls_token __A = attention_drop_rate __A = initializer_range __A = layer_norm_eps def UpperCamelCase_ ( self : Dict ): __A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __A = None if self.use_labels: # create a random int32 tensor of given shape __A = ids_tensor([self.batch_size] ,self.num_labels ) __A = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self : str ): return CvtConfig( image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,) def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : Tuple ,A : Any ): __A = TFCvtModel(config=A ) __A = model(A ,training=A ) __A = (self.image_size, self.image_size) __A , __A = image_size[0], image_size[1] for i in range(len(self.depth ) ): __A = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __A = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) ) def UpperCamelCase_ ( self : Union[str, Any] ,A : int ,A : Tuple ,A : List[Any] ): __A = self.num_labels __A = TFCvtForImageClassification(A ) __A = model(A ,labels=A ,training=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Optional[Any] ): __A = self.prepare_config_and_inputs() __A , __A , __A = config_and_inputs __A = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () snake_case_ = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def UpperCamelCase_ ( self : str ): __A = TFCvtModelTester(self ) __A = TFCvtConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 ) def UpperCamelCase_ ( self : str ): self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions" ) def UpperCamelCase_ ( self : Optional[Any] ): pass @unittest.skip(reason="Cvt does not use inputs_embeds" ) def UpperCamelCase_ ( self : int ): pass @unittest.skip(reason="Cvt does not support input and output embeddings" ) def UpperCamelCase_ ( self : Optional[Any] ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 ,reason="TF does not support backprop for grouped convolutions on CPU." ,) def UpperCamelCase_ ( self : Any ): super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 ,reason="TF does not support backprop for grouped convolutions on CPU." ,) @slow def UpperCamelCase_ ( self : List[str] ): super().test_keras_fit() @unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" ) def UpperCamelCase_ ( self : List[str] ): __A = tf.keras.mixed_precision.Policy("mixed_float16" ) tf.keras.mixed_precision.set_global_policy(A ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("float32" ) def UpperCamelCase_ ( self : Union[str, Any] ): __A , __A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A = model_class(A ) __A = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __A = [*signature.parameters.keys()] __A = ["pixel_values"] self.assertListEqual(arg_names[:1] ,A ) def UpperCamelCase_ ( self : List[str] ): def check_hidden_states_output(A : Union[str, Any] ,A : int ,A : List[Any] ): __A = model_class(A ) __A = model(**self._prepare_for_class(A ,A ) ) __A = outputs.hidden_states __A = len(self.model_tester.depth ) self.assertEqual(len(A ) ,A ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) ,[ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] ,) __A , __A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A = True check_hidden_states_output(A ,A ,A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __A = True check_hidden_states_output(A ,A ,A ) def UpperCamelCase_ ( self : Optional[Any] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self : Any ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) @slow def UpperCamelCase_ ( self : Optional[Any] ): for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A = TFCvtModel.from_pretrained(A ) self.assertIsNotNone(A ) def UpperCAmelCase ( ) -> Optional[Any]: """simple docstring""" __A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self : List[str] ): return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def UpperCamelCase_ ( self : str ): __A = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __A = self.default_image_processor __A = prepare_img() __A = image_processor(images=A ,return_tensors="tf" ) # forward pass __A = model(**A ) # verify the logits __A = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape ,A ) __A = tf.constant([0.92_85, 0.90_15, -0.31_50] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,A ,atol=1E-4 ) )
15
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" __A = args.pruning_method __A = args.threshold __A = args.model_name_or_path.rstrip("/" ) __A = args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) __A = torch.load(os.path.join(a_ , "pytorch_model.bin" ) ) __A = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "bias" in name: __A = tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": __A = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = TopKBinarizer.apply(a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = ThresholdBinarizer.apply(a_ , a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A , __A = -0.1, 1.1 __A = torch.sigmoid(a_ ) __A = s * (r - l) + l __A = s_bar.clamp(min=0.0 , max=1.0 ) __A = tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError("Unknown pruning method" ) if target_model_path is None: __A = os.path.join( os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' ) if not os.path.isdir(a_ ): shutil.copytree(a_ , a_ ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(a_ , os.path.join(a_ , "pytorch_model.bin" ) ) print("\nPruned model saved! See you later!" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) SCREAMING_SNAKE_CASE :str = parser.parse_args() main(args)
15
1
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class UpperCAmelCase : '''simple docstring''' def __init__( self : int ,A : Tuple ,A : List[str]=13 ,A : List[str]=7 ,A : str=True ,A : int=True ,A : Union[str, Any]=True ,A : str=True ,A : Dict=99 ,A : Optional[Any]=64 ,A : Union[str, Any]=32 ,A : Dict=5 ,A : Union[str, Any]=4 ,A : str=37 ,A : int="gelu" ,A : str=0.1 ,A : List[Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Tuple=16 ,A : List[Any]=2 ,A : Any=0.02 ,A : List[Any]=3 ,A : Union[str, Any]=4 ,A : List[str]=None ,): __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_input_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = embedding_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_labels __A = num_choices __A = scope def UpperCamelCase_ ( self : List[Any] ): __A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __A = None if self.use_input_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) __A = None __A = None __A = None if self.use_labels: __A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __A = ids_tensor([self.batch_size] ,self.num_choices ) __A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self : Any ): return MobileBertConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,) def UpperCamelCase_ ( self : Dict ,A : Tuple ,A : int ,A : List[Any] ,A : Optional[Any] ,A : Optional[int] ,A : int ,A : int ): __A = MobileBertModel(config=A ) model.to(A ) model.eval() __A = model(A ,attention_mask=A ,token_type_ids=A ) __A = model(A ,token_type_ids=A ) __A = model(A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def UpperCamelCase_ ( self : int ,A : Dict ,A : int ,A : List[Any] ,A : Optional[Any] ,A : Tuple ,A : str ,A : str ): __A = MobileBertForMaskedLM(config=A ) model.to(A ) model.eval() __A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self : Any ,A : Optional[Any] ,A : Optional[Any] ,A : Dict ,A : Optional[int] ,A : Any ,A : Optional[Any] ,A : Any ): __A = MobileBertForNextSentencePrediction(config=A ) model.to(A ) model.eval() __A = model( A ,attention_mask=A ,token_type_ids=A ,labels=A ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def UpperCamelCase_ ( self : Dict ,A : int ,A : str ,A : Optional[Any] ,A : Optional[int] ,A : Dict ,A : Tuple ,A : Optional[int] ): __A = MobileBertForPreTraining(config=A ) model.to(A ) model.eval() __A = model( A ,attention_mask=A ,token_type_ids=A ,labels=A ,next_sentence_label=A ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def UpperCamelCase_ ( self : int ,A : Tuple ,A : Any ,A : Dict ,A : List[Any] ,A : Union[str, Any] ,A : Optional[int] ,A : Optional[int] ): __A = MobileBertForQuestionAnswering(config=A ) model.to(A ) model.eval() __A = model( A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self : Tuple ,A : List[str] ,A : List[str] ,A : Optional[Any] ,A : List[Any] ,A : List[str] ,A : Dict ,A : Tuple ): __A = self.num_labels __A = MobileBertForSequenceClassification(A ) model.to(A ) model.eval() __A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Optional[int] ,A : Dict ,A : Optional[int] ,A : str ,A : Dict ,A : Union[str, Any] ,A : int ,A : Union[str, Any] ): __A = self.num_labels __A = MobileBertForTokenClassification(config=A ) model.to(A ) model.eval() __A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self : Optional[int] ,A : int ,A : str ,A : Optional[int] ,A : List[Any] ,A : Optional[Any] ,A : Optional[Any] ,A : Optional[Any] ): __A = self.num_choices __A = MobileBertForMultipleChoice(config=A ) model.to(A ) model.eval() __A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __A = model( A ,attention_mask=A ,token_type_ids=A ,labels=A ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def UpperCamelCase_ ( self : Tuple ): __A = self.prepare_config_and_inputs() ( ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ) = config_and_inputs __A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) snake_case_ = ( { "feature-extraction": MobileBertModel, "fill-mask": MobileBertForMaskedLM, "question-answering": MobileBertForQuestionAnswering, "text-classification": MobileBertForSequenceClassification, "token-classification": MobileBertForTokenClassification, "zero-shot": MobileBertForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = True def UpperCamelCase_ ( self : List[str] ,A : Tuple ,A : List[Any] ,A : Union[str, Any]=False ): __A = super()._prepare_for_class(A ,A ,return_labels=A ) if return_labels: if model_class in get_values(A ): __A = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=A ) __A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A ) return inputs_dict def UpperCamelCase_ ( self : int ): __A = MobileBertModelTester(self ) __A = ConfigTester(self ,config_class=A ,hidden_size=37 ) def UpperCamelCase_ ( self : Any ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Any ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*A ) def UpperCamelCase_ ( self : Union[str, Any] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*A ) def UpperCamelCase_ ( self : Optional[Any] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*A ) def UpperCamelCase_ ( self : Optional[Any] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*A ) def UpperCamelCase_ ( self : Optional[int] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*A ) def UpperCamelCase_ ( self : Any ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*A ) def UpperCamelCase_ ( self : str ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*A ) def UpperCamelCase_ ( self : int ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*A ) def UpperCAmelCase ( a_ ) -> str: """simple docstring""" return torch.tensor( a_ , dtype=torch.long , device=a_ , ) SCREAMING_SNAKE_CASE :List[str] = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self : Tuple ): __A = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(A ) __A = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): __A = model(A )[0] __A = torch.Size((1, 9, 5_12) ) self.assertEqual(output.shape ,A ) __A = torch.tensor( [ [ [-2.4736526E07, 8.2691656E04, 1.6521838E05], [-5.7541704E-01, 3.9056022E00, 4.4011507E00], [2.6047359E00, 1.5677652E00, -1.7324188E-01], ] ] ,device=A ,) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE __A = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) __A = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
15
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :int = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE :Union[str, Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } SCREAMING_SNAKE_CASE :int = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] snake_case_ = [] def __init__( self : Any ,A : List[str] ,A : str="<unk>" ,A : int="<s>" ,A : Union[str, Any]="</s>" ,A : List[str]="<pad>" ,A : int="[SEP]" ,A : Optional[Any]="[MASK]" ,A : Tuple="[CLS]" ,A : Optional[Dict[str, Any]] = None ,**A : Any ,): __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token __A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sep_token=A ,mask_token=A ,cls_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = vocab_file __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) @property def UpperCamelCase_ ( self : List[str] ): return self.sp_model.get_piece_size() def UpperCamelCase_ ( self : Optional[Any] ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[int] ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : str ,A : Optional[Any] ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : List[str] ,A : Tuple ): return self.sp_model.piece_to_id(A ) def UpperCamelCase_ ( self : List[Any] ,A : Tuple ): __A = self.sp_model.IdToPiece(A ) return token def UpperCamelCase_ ( self : List[Any] ,A : int ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string.strip() def UpperCamelCase_ ( self : Tuple ,A : List[int] ,A : bool = False ,A : bool = None ,A : bool = True ,**A : Union[str, Any] ,): __A = kwargs.pop("use_source_tokenizer" ,A ) __A = self.convert_ids_to_tokens(A ,skip_special_tokens=A ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __A = [] __A = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) __A = [] sub_texts.append(A ) else: current_sub_text.append(A ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: __A = re.sub(R" (\[(MASK|SEP)\])" ,R"\1" ," ".join(A ) ) else: __A = "".join(A ) __A = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __A = self.clean_up_tokenization(A ) return clean_text else: return text def UpperCamelCase_ ( self : str ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A = [self.cls_token_id] __A = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self : Optional[int] ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1] def UpperCamelCase_ ( self : Any ,A : List[int] ,A : Optional[List[int]] = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
15
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE :Optional[int] = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :str = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): SCREAMING_SNAKE_CASE :Any = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: SCREAMING_SNAKE_CASE :int = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def UpperCAmelCase ( a_ ) -> Optional[Any]: """simple docstring""" __A = (images / 2 + 0.5).clamp(0 , 1 ) __A = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __A = numpy_to_pil(a_ ) return images def UpperCAmelCase ( a_ ) -> int: """simple docstring""" if images.ndim == 3: __A = images[None, ...] __A = (images * 2_5_5).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images __A = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: __A = [Image.fromarray(a_ ) for image in images] return pil_images
15
1
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE :Optional[int] = '▁' SCREAMING_SNAKE_CASE :List[Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = BigBirdTokenizer snake_case_ = BigBirdTokenizerFast snake_case_ = True snake_case_ = True def UpperCamelCase_ ( self : Dict ): super().setUp() __A = self.tokenizer_class(A ,keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self : Optional[Any] ): __A = "<s>" __A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A ) def UpperCamelCase_ ( self : int ): __A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"<unk>" ) self.assertEqual(vocab_keys[1] ,"<s>" ) self.assertEqual(vocab_keys[-1] ,"[MASK]" ) self.assertEqual(len(A ) ,10_04 ) def UpperCamelCase_ ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size ,10_00 ) def UpperCamelCase_ ( self : str ): if not self.test_rust_tokenizer: return __A = self.get_tokenizer() __A = self.get_rust_tokenizer() __A = "I was born in 92000, and this is falsé." __A = tokenizer.tokenize(A ) __A = rust_tokenizer.tokenize(A ) self.assertListEqual(A ,A ) __A = tokenizer.encode(A ,add_special_tokens=A ) __A = rust_tokenizer.encode(A ,add_special_tokens=A ) self.assertListEqual(A ,A ) __A = self.get_rust_tokenizer() __A = tokenizer.encode(A ) __A = rust_tokenizer.encode(A ) self.assertListEqual(A ,A ) def UpperCamelCase_ ( self : Any ): __A = BigBirdTokenizer(A ,keep_accents=A ) __A = tokenizer.tokenize("This is a test" ) self.assertListEqual(A ,["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) ,[2_85, 46, 10, 1_70, 3_82] ,) __A = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( A ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] ,) __A = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,) __A = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] ,) @cached_property def UpperCamelCase_ ( self : str ): return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) @slow def UpperCamelCase_ ( self : List[str] ): __A = "Hello World!" __A = [65, 1_85_36, 22_60, 1_01, 66] self.assertListEqual(A ,self.big_tokenizer.encode(A ) ) @slow def UpperCamelCase_ ( self : Dict ): __A = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) # fmt: off __A = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231 # fmt: on self.assertListEqual(A ,self.big_tokenizer.encode(A ) ) @require_torch @slow def UpperCamelCase_ ( self : Any ): import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence __A = list(self.big_tokenizer.get_vocab().keys() )[:10] __A = " ".join(A ) __A = self.big_tokenizer.encode_plus(A ,return_tensors="pt" ,return_token_type_ids=A ) __A = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] ,return_tensors="pt" ,return_token_type_ids=A ) __A = BigBirdConfig(attention_type="original_full" ) __A = BigBirdModel(A ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**A ) model(**A ) @slow def UpperCamelCase_ ( self : List[str] ): __A = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) __A = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids ) self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" ) @slow def UpperCamelCase_ ( self : str ): # fmt: off __A = {"input_ids": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A ,model_name="google/bigbird-roberta-base" ,revision="215c99f1600e06f83acce68422f2035b2b5c3510" ,)
15
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :List[Any] = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "yolos" def __init__( self : Any ,A : Optional[Any]=7_68 ,A : Dict=12 ,A : Any=12 ,A : str=30_72 ,A : Any="gelu" ,A : str=0.0 ,A : List[str]=0.0 ,A : Dict=0.02 ,A : int=1E-12 ,A : Tuple=[5_12, 8_64] ,A : List[Any]=16 ,A : str=3 ,A : str=True ,A : Any=1_00 ,A : Dict=True ,A : Dict=False ,A : Tuple=1 ,A : Union[str, Any]=5 ,A : Optional[Any]=2 ,A : Union[str, Any]=5 ,A : int=2 ,A : int=0.1 ,**A : List[str] ,): super().__init__(**A ) __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = initializer_range __A = layer_norm_eps __A = image_size __A = patch_size __A = num_channels __A = qkv_bias __A = num_detection_tokens __A = use_mid_position_embeddings __A = auxiliary_loss # Hungarian matcher __A = class_cost __A = bbox_cost __A = giou_cost # Loss coefficients __A = bbox_loss_coefficient __A = giou_loss_coefficient __A = eos_coefficient class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = version.parse("1.11" ) @property def UpperCamelCase_ ( self : str ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase_ ( self : List[Any] ): return 1E-4 @property def UpperCamelCase_ ( self : Optional[Any] ): return 12
15
1
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "AutoTokenizer" snake_case_ = ["tokenizer"] snake_case_ = { "semantic_prompt": 1, "coarse_prompt": 2, "fine_prompt": 2, } def __init__( self : Optional[Any] ,A : Any ,A : Union[str, Any]=None ): super().__init__(A ) __A = speaker_embeddings @classmethod def UpperCamelCase_ ( cls : Any ,A : str ,A : List[str]="speaker_embeddings_path.json" ,**A : Optional[int] ): if speaker_embeddings_dict_path is not None: __A = get_file_from_repo( A ,A ,subfolder=kwargs.pop("subfolder" ,A ) ,cache_dir=kwargs.pop("cache_dir" ,A ) ,force_download=kwargs.pop("force_download" ,A ) ,proxies=kwargs.pop("proxies" ,A ) ,resume_download=kwargs.pop("resume_download" ,A ) ,local_files_only=kwargs.pop("local_files_only" ,A ) ,use_auth_token=kwargs.pop("use_auth_token" ,A ) ,revision=kwargs.pop("revision" ,A ) ,) if speaker_embeddings_path is None: logger.warning( f'''`{os.path.join(A ,A )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) __A = None else: with open(A ) as speaker_embeddings_json: __A = json.load(A ) else: __A = None __A = AutoTokenizer.from_pretrained(A ,**A ) return cls(tokenizer=A ,speaker_embeddings=A ) def UpperCamelCase_ ( self : Optional[Any] ,A : str ,A : int="speaker_embeddings_path.json" ,A : List[Any]="speaker_embeddings" ,A : bool = False ,**A : List[str] ,): if self.speaker_embeddings is not None: os.makedirs(os.path.join(A ,A ,"v2" ) ,exist_ok=A ) __A = {} __A = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": __A = self._load_voice_preset(A ) __A = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["repo_or_path"] ,A ,f'''{prompt_key}_{key}''' ) ,voice_preset[key] ,allow_pickle=A ,) __A = os.path.join(A ,f'''{prompt_key}_{key}.npy''' ) __A = tmp_dict with open(os.path.join(A ,A ) ,"w" ) as fp: json.dump(A ,A ) super().save_pretrained(A ,A ,**A ) def UpperCamelCase_ ( self : Dict ,A : str = None ,**A : Optional[int] ): __A = self.speaker_embeddings[voice_preset] __A = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) __A = get_file_from_repo( self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] ,subfolder=kwargs.pop("subfolder" ,A ) ,cache_dir=kwargs.pop("cache_dir" ,A ) ,force_download=kwargs.pop("force_download" ,A ) ,proxies=kwargs.pop("proxies" ,A ) ,resume_download=kwargs.pop("resume_download" ,A ) ,local_files_only=kwargs.pop("local_files_only" ,A ) ,use_auth_token=kwargs.pop("use_auth_token" ,A ) ,revision=kwargs.pop("revision" ,A ) ,) if path is None: raise ValueError( f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.''' ) __A = np.load(A ) return voice_preset_dict def UpperCamelCase_ ( self : List[str] ,A : Optional[dict] = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] ,np.ndarray ): raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self : List[Any] ,A : str=None ,A : Dict=None ,A : Dict="pt" ,A : List[Any]=2_56 ,A : Optional[int]=False ,A : Optional[Any]=True ,A : Any=False ,**A : Union[str, Any] ,): if voice_preset is not None and not isinstance(A ,A ): if ( isinstance(A ,A ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): __A = self._load_voice_preset(A ) else: if isinstance(A ,A ) and not voice_preset.endswith(".npz" ): __A = voice_preset + ".npz" __A = np.load(A ) if voice_preset is not None: self._validate_voice_preset_dict(A ,**A ) __A = BatchFeature(data=A ,tensor_type=A ) __A = self.tokenizer( A ,return_tensors=A ,padding="max_length" ,max_length=A ,return_attention_mask=A ,return_token_type_ids=A ,add_special_tokens=A ,**A ,) if voice_preset is not None: __A = voice_preset return encoded_text
15
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) SCREAMING_SNAKE_CASE :List[str] = 'pytorch_model.bin' SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json' SCREAMING_SNAKE_CASE :Optional[int] = 'adapter_config.json' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.bin' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.safetensors' SCREAMING_SNAKE_CASE :str = 'tf_model.h5' SCREAMING_SNAKE_CASE :List[Any] = 'tf_model.h5.index.json' SCREAMING_SNAKE_CASE :str = 'model.ckpt' SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack' SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack.index.json' SCREAMING_SNAKE_CASE :Tuple = 'model.safetensors' SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors.index.json' SCREAMING_SNAKE_CASE :str = 'config.json' SCREAMING_SNAKE_CASE :int = 'preprocessor_config.json' SCREAMING_SNAKE_CASE :Optional[Any] = FEATURE_EXTRACTOR_NAME SCREAMING_SNAKE_CASE :Optional[int] = 'generation_config.json' SCREAMING_SNAKE_CASE :List[str] = 'modelcard.json' SCREAMING_SNAKE_CASE :Optional[int] = '▁' SCREAMING_SNAKE_CASE :Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility SCREAMING_SNAKE_CASE :str = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. SCREAMING_SNAKE_CASE :Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] SCREAMING_SNAKE_CASE :List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def UpperCAmelCase ( a_ ) -> Dict: """simple docstring""" if version.parse(a_ ) < version.parse(a_ ): if "dev" in min_version: __A = ( "This example requires a source install from HuggingFace Transformers (see " "`https://huggingface.co/docs/transformers/installation#install-from-source`)," ) else: __A = F'''This example requires a minimum version of {min_version},''' error_message += F''' but the version found is {__version__}.\n''' raise ImportError( error_message + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " "versions of HuggingFace Transformers." )
15
1
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict: """simple docstring""" __A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes if new_olid.count("/" ) != 1: __A = F'''{olid} is not a valid Open Library olid''' raise ValueError(a_ ) return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json() def UpperCAmelCase ( a_ ) -> dict: """simple docstring""" __A = { "title": "Title", "publish_date": "Publish date", "authors": "Authors", "number_of_pages": "Number of pages:", "first_sentence": "First sentence", "isbn_10": "ISBN (10)", "isbn_13": "ISBN (13)", } __A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __A = [ get_openlibrary_data(author["key"] )["name"] for author in data["Authors"] ] __A = data["First sentence"]["value"] for key, value in data.items(): if isinstance(a_ , a_ ): __A = ", ".join(a_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: SCREAMING_SNAKE_CASE :List[Any] = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(f'''\nSearching Open Library for ISBN: {isbn}...\n''') try: SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}''')) print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'''Sorry, there are no results for ISBN: {isbn}.''')
15
def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" __A = [0] * len(a_ ) __A = [] __A = [1] * len(a_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(a_ ) ): if indegree[i] == 0: queue.append(a_ ) while queue: __A = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: __A = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(a_ ) print(max(a_ ) ) # Adjacency list of Graph SCREAMING_SNAKE_CASE :List[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
15
1
import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py SCREAMING_SNAKE_CASE :int = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. SCREAMING_SNAKE_CASE :int = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. SCREAMING_SNAKE_CASE :Optional[Any] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') SCREAMING_SNAKE_CASE :str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. SCREAMING_SNAKE_CASE :Tuple = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Fill this with tuples (pipeline_tag, model_mapping, auto_model) SCREAMING_SNAKE_CASE :int = [ ('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'), ('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'), ('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'), ('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'), ('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'), ('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'), ('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'), ('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'), ('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'), ( 'zero-shot-object-detection', 'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForZeroShotObjectDetection', ), ('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'), ('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'), ('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'), ('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'), ( 'table-question-answering', 'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForTableQuestionAnswering', ), ('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'), ('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'), ( 'next-sentence-prediction', 'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES', 'AutoModelForNextSentencePrediction', ), ( 'audio-frame-classification', 'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioFrameClassification', ), ('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'), ( 'document-question-answering', 'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForDocumentQuestionAnswering', ), ( 'visual-question-answering', 'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForVisualQuestionAnswering', ), ('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'), ( 'zero-shot-image-classification', 'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForZeroShotImageClassification', ), ('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'), ('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'), ('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'), ] def UpperCAmelCase ( a_ ) -> Dict: """simple docstring""" __A = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , a_ ) return [m.group(0 ) for m in matches] def UpperCAmelCase ( ) -> int: """simple docstring""" __A = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __A = { config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. __A = collections.defaultdict(a_ ) __A = collections.defaultdict(a_ ) __A = collections.defaultdict(a_ ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(a_ ): __A = None if _re_tf_models.match(a_ ) is not None: __A = tf_models __A = _re_tf_models.match(a_ ).groups()[0] elif _re_flax_models.match(a_ ) is not None: __A = flax_models __A = _re_flax_models.match(a_ ).groups()[0] elif _re_pt_models.match(a_ ) is not None: __A = pt_models __A = _re_pt_models.match(a_ ).groups()[0] if lookup_dict is not None: while len(a_ ) > 0: if attr_name in model_prefix_to_model_type: __A = True break # Try again after removing the last word in the name __A = "".join(camel_case_split(a_ )[:-1] ) __A = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) __A = list(a_ ) all_models.sort() __A = {"model_type": all_models} __A = [pt_models[t] for t in all_models] __A = [tf_models[t] for t in all_models] __A = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure __A = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: __A = "AutoProcessor" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: __A = "AutoTokenizer" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: __A = "AutoFeatureExtractor" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. __A = "AutoTokenizer" __A = [processors[t] for t in all_models] return pd.DataFrame(a_ ) def UpperCAmelCase ( a_ ) -> Any: """simple docstring""" __A = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: __A = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}'''] __A = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}'''] # Loop through all three frameworks for module, cls, mapping in zip(a_ , a_ , a_ ): # The type of pipeline may not exist in this framework if not hasattr(a_ , a_ ): continue # First extract all model_names __A = [] for name in getattr(a_ , a_ ).values(): if isinstance(a_ , a_ ): model_names.append(a_ ) else: model_names.extend(list(a_ ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def UpperCAmelCase ( a_ , a_ ) -> Tuple: """simple docstring""" __A = get_frameworks_table() __A = Dataset.from_pandas(a_ ) __A = hf_hub_download( "huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=a_ ) __A = Dataset.from_json(a_ ) __A = { tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"]) for i in range(len(a_ ) ) } __A = update_pipeline_and_auto_class_table(a_ ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. __A = sorted(table.keys() ) __A = pd.DataFrame( { "model_class": model_classes, "pipeline_tag": [table[m][0] for m in model_classes], "auto_class": [table[m][1] for m in model_classes], } ) __A = Dataset.from_pandas(a_ ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(a_ , "frameworks.json" ) ) tags_dataset.to_json(os.path.join(a_ , "pipeline_tags.json" ) ) if commit_sha is not None: __A = ( F'''Update with commit {commit_sha}\n\nSee: ''' F'''https://github.com/huggingface/transformers/commit/{commit_sha}''' ) else: __A = "Update" upload_folder( repo_id="huggingface/transformers-metadata" , folder_path=a_ , repo_type="dataset" , token=a_ , commit_message=a_ , ) def UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" __A = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} __A = transformers_module.pipelines.SUPPORTED_TASKS __A = [] for key in pipeline_tasks: if key not in in_table: __A = pipeline_tasks[key]["pt"] if isinstance(a_ , (list, tuple) ): __A = model[0] __A = model.__name__ if model not in in_table.values(): missing.append(a_ ) if len(a_ ) > 0: __A = ", ".join(a_ ) raise ValueError( "The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside " F'''`utils/update_metadata.py`: {msg}. Please add them!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Optional[int] = argparse.ArgumentParser() parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.') parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.') parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.') SCREAMING_SNAKE_CASE :int = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
15
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def UpperCAmelCase ( a_ , a_ ) -> Tuple: """simple docstring""" __A = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __A = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" ) __A = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" ) __A = key.replace("heads.cmd.itm_head.cls" , "itm_head" ) __A = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" ) __A = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" ) __A = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" ) __A = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" ) __A = key.replace("mm_text_projection" , "flava.text_to_mm_projection" ) __A = key.replace("mm_image_projection" , "flava.image_to_mm_projection" ) __A = key.replace("image_encoder.module" , "flava.image_model" ) __A = key.replace("text_encoder.module" , "flava.text_model" ) __A = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" ) __A = key.replace("mm_encoder.module" , "flava.multimodal_model" ) __A = key.replace("text_projection" , "flava.text_projection" ) __A = key.replace("image_projection" , "flava.image_projection" ) __A = value.float() for key, value in codebook_state_dict.items(): __A = value return upgrade @torch.no_grad() def UpperCAmelCase ( a_ , a_ , a_ , a_=None ) -> Tuple: """simple docstring""" if config_path is not None: __A = FlavaConfig.from_pretrained(a_ ) else: __A = FlavaConfig() __A = FlavaForPreTraining(a_ ).eval() __A = convert_dalle_checkpoint(a_ , a_ , save_checkpoint=a_ ) if os.path.exists(a_ ): __A = torch.load(a_ , map_location="cpu" ) else: __A = torch.hub.load_state_dict_from_url(a_ , map_location="cpu" ) __A = upgrade_state_dict(a_ , a_ ) hf_model.load_state_dict(a_ ) __A = hf_model.state_dict() __A = count_parameters(a_ ) __A = count_parameters(a_ ) + count_parameters(a_ ) assert torch.allclose(a_ , a_ , atol=1E-3 ) hf_model.save_pretrained(a_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
15
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 snake_case_ = 42 snake_case_ = None class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 2 @register_to_config def __init__( self : str ,A : float = 0.02 ,A : float = 1_00 ,A : float = 1.0_07 ,A : float = 80 ,A : float = 0.05 ,A : float = 50 ,): # standard deviation of the initial noise distribution __A = sigma_max # setable values __A = None __A = None __A = None # sigma(t_i) def UpperCamelCase_ ( self : str ,A : torch.FloatTensor ,A : Optional[int] = None ): return sample def UpperCamelCase_ ( self : Dict ,A : int ,A : Union[str, torch.device] = None ): __A = num_inference_steps __A = np.arange(0 ,self.num_inference_steps )[::-1].copy() __A = torch.from_numpy(A ).to(A ) __A = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __A = torch.tensor(A ,dtype=torch.floataa ,device=A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : torch.FloatTensor ,A : float ,A : Optional[torch.Generator] = None ): if self.config.s_min <= sigma <= self.config.s_max: __A = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 ) else: __A = 0 # sample eps ~ N(0, S_noise^2 * I) __A = self.config.s_noise * randn_tensor(sample.shape ,generator=A ).to(sample.device ) __A = sigma + gamma * sigma __A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase_ ( self : Dict ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_hat + sigma_hat * model_output __A = (sample_hat - pred_original_sample) / sigma_hat __A = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : Optional[int] ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_prev + sigma_prev * model_output __A = (sample_prev - pred_original_sample) / sigma_prev __A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : List[str] ,A : str ): raise NotImplementedError()
15
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} SCREAMING_SNAKE_CASE :Tuple = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } SCREAMING_SNAKE_CASE :List[Any] = { 'camembert-base': 512, } SCREAMING_SNAKE_CASE :List[str] = '▁' class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] ,A : List[str] ,A : List[Any]="<s>" ,A : Tuple="</s>" ,A : Any="</s>" ,A : Optional[Any]="<s>" ,A : Tuple="<unk>" ,A : str="<pad>" ,A : int="<mask>" ,A : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] ,A : Optional[Dict[str, Any]] = None ,**A : Optional[Any] ,): # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token __A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,additional_special_tokens=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A ) ) __A = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __A = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} __A = len(self.fairseq_tokens_to_ids ) __A = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self : int ,A : List[int] ,A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A = [self.cls_token_id] __A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1] def UpperCamelCase_ ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self : Dict ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self : int ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : List[str] ,A : Dict ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(A ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(A ) def UpperCamelCase_ ( self : Dict ,A : Tuple ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string.strip() def __getstate__( self : Dict ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : Union[str, Any] ,A : Any ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : Any ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,)
15
1
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" __A = args.pruning_method __A = args.threshold __A = args.model_name_or_path.rstrip("/" ) __A = args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) __A = torch.load(os.path.join(a_ , "pytorch_model.bin" ) ) __A = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "bias" in name: __A = tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": __A = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = TopKBinarizer.apply(a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = ThresholdBinarizer.apply(a_ , a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A , __A = -0.1, 1.1 __A = torch.sigmoid(a_ ) __A = s * (r - l) + l __A = s_bar.clamp(min=0.0 , max=1.0 ) __A = tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError("Unknown pruning method" ) if target_model_path is None: __A = os.path.join( os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' ) if not os.path.isdir(a_ ): shutil.copytree(a_ , a_ ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(a_ , os.path.join(a_ , "pytorch_model.bin" ) ) print("\nPruned model saved! See you later!" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) SCREAMING_SNAKE_CASE :str = parser.parse_args() main(args)
15
def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) <= 1: return [tuple(a_ )] __A = [] def generate(a_ , a_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , a_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even __A , __A = arr[k - 1], arr[i] else: # k is odd __A , __A = arr[k - 1], arr[0] generate(k - 1 , a_ ) generate(len(a_ ) , a_ ) return res if __name__ == "__main__": SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by a comma:\n').strip() SCREAMING_SNAKE_CASE :Dict = [int(item) for item in user_input.split(',')] print(heaps(arr))
15
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE :Tuple = { 'configuration_distilbert': [ 'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DistilBertConfig', 'DistilBertOnnxConfig', ], 'tokenization_distilbert': ['DistilBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Union[str, Any] = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Optional[Any] = [ 'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Optional[Any] = [ 'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :str = [ 'FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) <= 1: return lst __A = 1 while i < len(a_ ): if lst[i - 1] <= lst[i]: i += 1 else: __A , __A = lst[i], lst[i - 1] i -= 1 if i == 0: __A = 1 return lst if __name__ == "__main__": SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma:\n').strip() SCREAMING_SNAKE_CASE :List[Any] = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
15
1
def UpperCAmelCase ( a_ = 1_0_0_0 ) -> int: """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
15
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 snake_case_ = 42 snake_case_ = None class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 2 @register_to_config def __init__( self : str ,A : float = 0.02 ,A : float = 1_00 ,A : float = 1.0_07 ,A : float = 80 ,A : float = 0.05 ,A : float = 50 ,): # standard deviation of the initial noise distribution __A = sigma_max # setable values __A = None __A = None __A = None # sigma(t_i) def UpperCamelCase_ ( self : str ,A : torch.FloatTensor ,A : Optional[int] = None ): return sample def UpperCamelCase_ ( self : Dict ,A : int ,A : Union[str, torch.device] = None ): __A = num_inference_steps __A = np.arange(0 ,self.num_inference_steps )[::-1].copy() __A = torch.from_numpy(A ).to(A ) __A = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __A = torch.tensor(A ,dtype=torch.floataa ,device=A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : torch.FloatTensor ,A : float ,A : Optional[torch.Generator] = None ): if self.config.s_min <= sigma <= self.config.s_max: __A = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 ) else: __A = 0 # sample eps ~ N(0, S_noise^2 * I) __A = self.config.s_noise * randn_tensor(sample.shape ,generator=A ).to(sample.device ) __A = sigma + gamma * sigma __A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase_ ( self : Dict ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_hat + sigma_hat * model_output __A = (sample_hat - pred_original_sample) / sigma_hat __A = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : Optional[int] ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_prev + sigma_prev * model_output __A = (sample_prev - pred_original_sample) / sigma_prev __A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : List[str] ,A : str ): raise NotImplementedError()
15
1
def UpperCAmelCase ( a_ = 1_0_0_0_0_0_0 ) -> int: """simple docstring""" __A = set(range(3 , a_ , 2 ) ) primes.add(2 ) for p in range(3 , a_ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , a_ , a_ ) ) ) __A = [float(a_ ) for n in range(limit + 1 )] for p in primes: for n in range(a_ , limit + 1 , a_ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'''{solution() = }''')
15
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version SCREAMING_SNAKE_CASE :Union[str, Any] = get_logger(__name__) class UpperCAmelCase : '''simple docstring''' snake_case_ = "dummy_data" snake_case_ = "datasets" snake_case_ = False def __init__( self : Optional[int] ,A : str ,A : str ,A : Union[Version, str] ,A : Optional[str] = None ,A : bool = False ,A : bool = True ,A : Optional[List[Callable]] = None ,): __A = 0 __A = dataset_name __A = cache_dir __A = use_local_dummy_data __A = config # download_callbacks take a single url as input __A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root __A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general __A = str(A ) # to be downloaded __A = None __A = None @property def UpperCamelCase_ ( self : Union[str, Any] ): if self._dummy_file is None: __A = self.download_dummy_data() return self._dummy_file @property def UpperCamelCase_ ( self : Optional[Any] ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def UpperCamelCase_ ( self : List[Any] ): return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def UpperCamelCase_ ( self : Tuple ): __A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) __A = cached_path( A ,cache_dir=self.cache_dir ,extract_compressed_file=A ,force_extract=A ) return os.path.join(A ,self.dummy_file_name ) @property def UpperCamelCase_ ( self : str ): return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def UpperCamelCase_ ( self : Any ): if self._bucket_url is None: __A = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def UpperCamelCase_ ( self : Tuple ): # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,*A : Dict ): if self.load_existing_dummy_data: # dummy data is downloaded and tested __A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned __A = self.dummy_file_name # special case when data_url is a dict if isinstance(A ,A ): return self.create_dummy_data_dict(A ,A ) elif isinstance(A ,(list, tuple) ): return self.create_dummy_data_list(A ,A ) else: return self.create_dummy_data_single(A ,A ) def UpperCamelCase_ ( self : str ,A : List[Any] ,*A : List[Any] ): return self.download_and_extract(A ) def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Tuple ): return self.download_and_extract(A ) def UpperCamelCase_ ( self : Any ,A : Any ,*A : Optional[Any] ,**A : List[str] ): return path def UpperCamelCase_ ( self : str ): return {} def UpperCamelCase_ ( self : int ,A : int ,A : Tuple ): __A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(A ,A ): for single_url in single_urls: download_callback(A ) else: __A = single_urls download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(A ,A ): __A = [os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) for x in single_urls] else: __A = single_urls __A = os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) __A = value # make sure that values are unique if all(isinstance(A ,A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique __A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : str ): __A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one __A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,A ) ) for url in data_url ) __A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): __A = [data_url[0]] * len(A ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __A = os.path.join(A ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(A ) return dummy_data_list def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ): for download_callback in self.download_callbacks: download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __A = os.path.join(A ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(A ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCamelCase_ ( self : int ): pass def UpperCamelCase_ ( self : Dict ): pass def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ): def _iter_archive_members(A : Optional[Any] ): # this preserves the order of the members inside the ZIP archive __A = Path(self.dummy_file ).parent __A = path.relative_to(A ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: __A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(A ) __A = Path(A ) __A = _iter_archive_members(A ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(A ).as_posix(), file_path.open("rb" ) def UpperCamelCase_ ( self : List[Any] ,A : Any ): if not isinstance(A ,A ): __A = [paths] for path in paths: if os.path.isfile(A ): if os.path.basename(A ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(A ): if os.path.basename(A ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(A ): if filename.startswith((".", "__") ): continue yield os.path.join(A ,A )
15
1
def UpperCAmelCase ( a_ , a_ ) -> str: """simple docstring""" if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) __A = str(bin(a_ ) )[2:] # remove the leading "0b" __A = str(bin(a_ ) )[2:] # remove the leading "0b" __A = max(len(a_ ) , len(a_ ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(a_ ) , b_binary.zfill(a_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
15
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE :List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :List[str] = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
1
def UpperCAmelCase ( a_ ) -> int: """simple docstring""" if not isinstance(a_ , a_ ): __A = F'''Input value of [number={number}] must be an integer''' raise TypeError(a_ ) if number < 1: __A = F'''Input value of [number={number}] must be > 0''' raise ValueError(a_ ) __A = 1 for i in range(1 , a_ ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
15
from typing import Dict, Optional import numpy as np import datasets SCREAMING_SNAKE_CASE :List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' SCREAMING_SNAKE_CASE :List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' SCREAMING_SNAKE_CASE :str = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Tuple: """simple docstring""" if label_map is not None: for old_id, new_id in label_map.items(): __A = new_id # turn into Numpy arrays __A = np.array(a_ ) __A = np.array(a_ ) if reduce_labels: __A = 2_5_5 __A = label - 1 __A = 2_5_5 __A = label != ignore_index __A = np.not_equal(a_ , a_ ) __A = pred_label[mask] __A = np.array(a_ )[mask] __A = pred_label[pred_label == label] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Union[str, Any]: """simple docstring""" __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(a_ , a_ ): __A , __A , __A , __A = intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = None , a_ = False , ) -> str: """simple docstring""" __A , __A , __A , __A = total_intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) # compute metrics __A = {} __A = total_area_intersect.sum() / total_area_label.sum() __A = total_area_intersect / total_area_union __A = total_area_intersect / total_area_label __A = np.nanmean(a_ ) __A = np.nanmean(a_ ) __A = all_acc __A = iou __A = acc if nan_to_num is not None: __A = {metric: np.nan_to_num(a_ , nan=a_ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), } ) ,reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ] ,) def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Optional[Any] ,A : int ,A : bool ,A : Optional[int] = None ,A : Optional[Dict[int, int]] = None ,A : bool = False ,): __A = mean_iou( results=A ,gt_seg_maps=A ,num_labels=A ,ignore_index=A ,nan_to_num=A ,label_map=A ,reduce_labels=A ,) return iou_result
15
1
from collections.abc import Callable class UpperCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,A : Callable | None = None ): # Stores actual heap items. __A = [] # Stores indexes of each item for supporting updates and deletion. __A = {} # Stores current size of heap. __A = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. __A = key or (lambda A : x) def UpperCamelCase_ ( self : Union[str, Any] ,A : int ): return int((i - 1) / 2 ) if i > 0 else None def UpperCamelCase_ ( self : int ,A : int ): __A = int(2 * i + 1 ) return left if 0 < left < self.size else None def UpperCamelCase_ ( self : Optional[Any] ,A : int ): __A = int(2 * i + 2 ) return right if 0 < right < self.size else None def UpperCamelCase_ ( self : int ,A : int ,A : int ): __A , __A = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. __A , __A = self.arr[j], self.arr[i] def UpperCamelCase_ ( self : Tuple ,A : int ,A : int ): return self.arr[i][1] < self.arr[j][1] def UpperCamelCase_ ( self : Optional[Any] ,A : int ): __A = self._left(A ) __A = self._right(A ) __A = i if left is not None and not self._cmp(A ,A ): __A = left if right is not None and not self._cmp(A ,A ): __A = right return valid_parent def UpperCamelCase_ ( self : Tuple ,A : int ): __A = self._parent(A ) while parent is not None and not self._cmp(A ,A ): self._swap(A ,A ) __A , __A = parent, self._parent(A ) def UpperCamelCase_ ( self : Tuple ,A : int ): __A = self._get_valid_parent(A ) while valid_parent != index: self._swap(A ,A ) __A , __A = valid_parent, self._get_valid_parent(A ) def UpperCamelCase_ ( self : List[Any] ,A : int ,A : int ): if item not in self.pos_map: return __A = self.pos_map[item] __A = [item, self.key(A )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(A ) self._heapify_down(A ) def UpperCamelCase_ ( self : str ,A : int ): if item not in self.pos_map: return __A = self.pos_map[item] del self.pos_map[item] __A = self.arr[self.size - 1] __A = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(A ) self._heapify_down(A ) def UpperCamelCase_ ( self : Dict ,A : int ,A : int ): __A = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(A )] ) else: __A = [item, self.key(A )] __A = self.size self.size += 1 self._heapify_up(self.size - 1 ) def UpperCamelCase_ ( self : List[str] ): return self.arr[0] if self.size else None def UpperCamelCase_ ( self : Dict ): __A = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def UpperCAmelCase ( ) -> None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
15
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :List[str] = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE :Dict = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } SCREAMING_SNAKE_CASE :Optional[Any] = { 'AI-Sweden/gpt-sw3-126m': 2048, 'AI-Sweden/gpt-sw3-350m': 2048, 'AI-Sweden/gpt-sw3-1.6b': 2048, 'AI-Sweden/gpt-sw3-6.7b': 2048, 'AI-Sweden/gpt-sw3-20b': 2048, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] def __init__( self : Optional[int] ,A : Optional[Any] ,A : Optional[int]=False ,A : int=False ,A : Union[str, Any]=False ,A : int=None ,A : Optional[Any]=None ,A : Union[str, Any]=None ,A : Optional[Any]=None ,A : Optional[Dict[str, Any]] = None ,**A : Tuple ,): __A = {} if sp_model_kwargs is None else sp_model_kwargs __A = kwargs.get("name_or_path" ) if name_or_path is None: logger.warning( "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b," " you are testing the model, this can safely be ignored" ) __A = "None" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __A = "<|endoftext|>" if eos_token is None else eos_token __A = "<unk>" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __A = unk_token if pad_token is None else pad_token __A = eos_token if bos_token is None else bos_token else: __A = "<pad>" if pad_token is None else pad_token __A = "<s>" if bos_token is None else bos_token super().__init__( do_lower_case=A ,remove_space=A ,keep_accents=A ,bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = do_lower_case __A = remove_space __A = keep_accents __A = vocab_file __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) # Used for whitespace normalization in input texts # fmt : off __A = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __A = re.compile( f'''[{''.join(map(A ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(1_27 ,1_60 ) ) + [1_60, 1_73, 82_03] ) )}]''' ) def __getstate__( self : Optional[int] ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : Optional[Any] ,A : Union[str, Any] ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase_ ( self : List[str] ): return len(self.sp_model ) def UpperCamelCase_ ( self : int ,A : str ): __A = self.non_printing_characters_re.sub("" ,A ) # Normalize whitespaces __A = "".join([char if char not in self.whitespaces else " " for char in text] ) # NFC Unicode normalization __A = unicodedata.normalize("NFC" ,A ) return text def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,**A : Optional[int] ): __A = self.preprocess_text(A ) return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.PieceToId(A ) def UpperCamelCase_ ( self : Dict ,A : int ): return self.sp_model.IdToPiece(A ) @staticmethod def UpperCamelCase_ ( A : str ): return out_string def UpperCamelCase_ ( self : str ,A : List[str] ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string def UpperCamelCase_ ( self : str ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self : List[str] ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self : Union[str, Any] ,A : Union[str, List[str]] ,A : Union[str, bool] = False ): if isinstance(A ,A ): __A = self.preprocess_text(A ) __A = self.sp_model.encode(A ) else: __A = [self.preprocess_text(A ) for t in text] __A = self.sp_model.encode(A ) if return_tensors is True or return_tensors == "pt": __A = torch.tensor(A ) return token_ids def UpperCamelCase_ ( self : List[Any] ,A : Union[int, List[int]] ): return self.sp_model.decode(A ) def UpperCamelCase_ ( self : List[str] ,A : "Conversation" ): __A = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] __A = ( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(A ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=A )
15
1
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__) @add_end_docstrings( __SCREAMING_SNAKE_CASE , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self : str ,A : GenericTensor ): if self.framework == "tf": __A = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": __A = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=A ) else: raise ValueError("Unsupported framework" ) return masked_index def UpperCamelCase_ ( self : int ,A : GenericTensor ): __A = self.get_masked_index(A ) __A = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( "fill-mask" ,self.model.base_model_prefix ,f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' ,) def UpperCamelCase_ ( self : int ,A : GenericTensor ): if isinstance(A ,A ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(A ) def UpperCamelCase_ ( self : List[str] ,A : Optional[Any] ,A : Dict=None ,**A : Dict ): if return_tensors is None: __A = self.framework __A = self.tokenizer(A ,return_tensors=A ) self.ensure_exactly_one_mask_token(A ) return model_inputs def UpperCamelCase_ ( self : Union[str, Any] ,A : int ): __A = self.model(**A ) __A = model_inputs["input_ids"] return model_outputs def UpperCamelCase_ ( self : Any ,A : int ,A : Any=5 ,A : Any=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: __A = target_ids.shape[0] __A = model_outputs["input_ids"][0] __A = model_outputs["logits"] if self.framework == "tf": __A = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] __A = outputs.numpy() __A = outputs[0, masked_index, :] __A = stable_softmax(A ,axis=-1 ) if target_ids is not None: __A = tf.gather_nd(tf.squeeze(A ,0 ) ,target_ids.reshape(-1 ,1 ) ) __A = tf.expand_dims(A ,0 ) __A = tf.math.top_k(A ,k=A ) __A , __A = topk.values.numpy(), topk.indices.numpy() else: __A = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=A ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample __A = outputs[0, masked_index, :] __A = logits.softmax(dim=-1 ) if target_ids is not None: __A = probs[..., target_ids] __A , __A = probs.topk(A ) __A = [] __A = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ): __A = [] for v, p in zip(_values ,_predictions ): # Copy is important since we're going to modify this array in place __A = input_ids.numpy().copy() if target_ids is not None: __A = target_ids[p].tolist() __A = p # Filter padding out: __A = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back __A = self.tokenizer.decode(A ,skip_special_tokens=A ) __A = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(A ) result.append(A ) if single_mask: return result[0] return result def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : Any=None ): if isinstance(A ,A ): __A = [targets] try: __A = self.tokenizer.get_vocab() except Exception: __A = {} __A = [] for target in targets: __A = vocab.get(A ,A ) if id_ is None: __A = self.tokenizer( A ,add_special_tokens=A ,return_attention_mask=A ,return_token_type_ids=A ,max_length=1 ,truncation=A ,)["input_ids"] if len(A ) == 0: logger.warning( f'''The specified target token `{target}` does not exist in the model vocabulary. ''' "We cannot replace it with anything meaningful, ignoring it" ) continue __A = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f'''The specified target token `{target}` does not exist in the model vocabulary. ''' f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' ) target_ids.append(id_ ) __A = list(set(A ) ) if len(A ) == 0: raise ValueError("At least one target must be provided when passed." ) __A = np.array(A ) return target_ids def UpperCamelCase_ ( self : Dict ,A : Union[str, Any]=None ,A : int=None ): __A = {} if targets is not None: __A = self.get_target_ids(A ,A ) __A = target_ids if top_k is not None: __A = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask" ,self.model.base_model_prefix ,"The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__( self : Optional[Any] ,A : Optional[int] ,*A : Dict ,**A : Tuple ): __A = super().__call__(A ,**A ) if isinstance(A ,A ) and len(A ) == 1: return outputs[0] return outputs
15
import numpy as np def UpperCAmelCase ( a_ , a_ , a_ = 1E-12 , a_ = 1_0_0 , ) -> tuple[float, np.ndarray]: """simple docstring""" assert np.shape(a_ )[0] == np.shape(a_ )[1] # Ensure proper dimensionality. assert np.shape(a_ )[0] == np.shape(a_ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ ) __A = np.iscomplexobj(a_ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(a_ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __A = False __A = 0 __A = 0 __A = 1E12 while not convergence: # Multiple matrix by the vector. __A = np.dot(a_ , a_ ) # Normalize the resulting output vector. __A = w / np.linalg.norm(a_ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __A = vector.conj().T if is_complex else vector.T __A = np.dot(a_ , np.dot(a_ , a_ ) ) # Check convergence. __A = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __A = True __A = lambda_ if is_complex: __A = np.real(lambda_ ) return lambda_, vector def UpperCAmelCase ( ) -> None: """simple docstring""" __A = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] ) __A = np.array([4_1, 4, 2_0] ) __A = real_input_matrix.astype(np.complexaaa ) __A = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __A = np.array([4_1, 4, 2_0] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __A = real_input_matrix __A = real_vector elif problem_type == "complex": __A = complex_input_matrix __A = complex_vector # Our implementation. __A , __A = power_iteration(a_ , a_ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __A , __A = np.linalg.eigh(a_ ) # Last eigenvalue is the maximum one. __A = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __A = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
15
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE :List[Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Dict = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE :str = 'RegNetConfig' # Base docstring SCREAMING_SNAKE_CASE :List[str] = 'facebook/regnet-y-040' SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 1088, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE :Optional[int] = 'facebook/regnet-y-040' SCREAMING_SNAKE_CASE :Any = 'tabby, tabby cat' SCREAMING_SNAKE_CASE :Optional[int] = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple ,A : int ,A : int = 3 ,A : int = 1 ,A : int = 1 ,A : Optional[str] = "relu" ,**A : Dict ,): super().__init__(**A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __A = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __A = tf.keras.layers.ConvaD( filters=A ,kernel_size=A ,strides=A ,padding="VALID" ,groups=A ,use_bias=A ,name="convolution" ,) __A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" ) __A = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : List[Any] ,A : Any ): __A = self.convolution(self.padding(A ) ) __A = self.normalization(A ) __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple ,A : RegNetConfig ,**A : str ): super().__init__(**A ) __A = config.num_channels __A = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="embedder" ,) def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ): __A = shape_list(A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __A = tf.transpose(A ,perm=(0, 2, 3, 1) ) __A = self.embedder(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Optional[int] ,A : int ,A : int = 2 ,**A : Tuple ): super().__init__(**A ) __A = tf.keras.layers.ConvaD( filters=A ,kernel_size=1 ,strides=A ,use_bias=A ,name="convolution" ) __A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" ) def UpperCamelCase_ ( self : Union[str, Any] ,A : tf.Tensor ,A : bool = False ): return self.normalization(self.convolution(A ) ,training=A ) class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Dict ,A : int ,A : int ,**A : str ): super().__init__(**A ) __A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" ) __A = [ tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="relu" ,name="attention.0" ), tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="sigmoid" ,name="attention.2" ), ] def UpperCamelCase_ ( self : Dict ,A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __A = self.pooler(A ) for layer_module in self.attention: __A = layer_module(A ) __A = hidden_state * pooled return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : Optional[int] ): super().__init__(**A ) __A = in_channels != out_channels or stride != 1 __A = max(1 ,out_channels // config.groups_width ) __A = ( TFRegNetShortCut(A ,stride=A ,name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" ,name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __A = [ TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ), TFRegNetConvLayer( A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ), TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.2" ), ] __A = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : int ,A : Optional[int] ): __A = hidden_state for layer_module in self.layers: __A = layer_module(A ) __A = self.shortcut(A ) hidden_state += residual __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[Any] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : str ): super().__init__(**A ) __A = in_channels != out_channels or stride != 1 __A = max(1 ,out_channels // config.groups_width ) __A = ( TFRegNetShortCut(A ,stride=A ,name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" ,name="shortcut" ) ) __A = [ TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ), TFRegNetConvLayer( A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ), TFRegNetSELayer(A ,reduced_channels=int(round(in_channels / 4 ) ) ,name="layer.2" ), TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.3" ), ] __A = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict ,A : Any ): __A = hidden_state for layer_module in self.layers: __A = layer_module(A ) __A = self.shortcut(A ) hidden_state += residual __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 2 ,A : int = 2 ,**A : Optional[int] ): super().__init__(**A ) __A = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __A = [ # downsampling is done in the first layer with stride of 2 layer(A ,A ,A ,stride=A ,name="layers.0" ), *[layer(A ,A ,A ,name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Any ,A : List[str] ): for layer_module in self.layers: __A = layer_module(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any ,A : RegNetConfig ,**A : List[str] ): super().__init__(**A ) __A = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( A ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="stages.0" ,) ) __A = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(A ,config.depths[1:] ) ): self.stages.append(TFRegNetStage(A ,A ,A ,depth=A ,name=f'''stages.{i+1}''' ) ) def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor ,A : bool = False ,A : bool = True ): __A = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __A = hidden_states + (hidden_state,) __A = stage_module(A ) if output_hidden_states: __A = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=A ,hidden_states=A ) @keras_serializable class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' snake_case_ = RegNetConfig def __init__( self : int ,A : Optional[int] ,**A : Dict ): super().__init__(**A ) __A = config __A = TFRegNetEmbeddings(A ,name="embedder" ) __A = TFRegNetEncoder(A ,name="encoder" ) __A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" ) @unpack_inputs def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : bool = False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.embedder(A ,training=A ) __A = self.encoder( A ,output_hidden_states=A ,return_dict=A ,training=A ) __A = encoder_outputs[0] __A = self.pooler(A ) # Change to NCHW output format have uniformity in the modules __A = tf.transpose(A ,perm=(0, 3, 1, 2) ) __A = tf.transpose(A ,perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __A = tuple([tf.transpose(A ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A ,pooler_output=A ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = RegNetConfig snake_case_ = "regnet" snake_case_ = "pixel_values" @property def UpperCamelCase_ ( self : Optional[Any] ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )} SCREAMING_SNAKE_CASE :Dict = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' SCREAMING_SNAKE_CASE :Dict = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,A : RegNetConfig ,*A : List[Any] ,**A : str ): super().__init__(A ,*A ,**A ) __A = TFRegNetMainLayer(A ,name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : int=False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.regnet( pixel_values=A ,output_hidden_states=A ,return_dict=A ,training=A ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[int] ,A : RegNetConfig ,*A : str ,**A : Tuple ): super().__init__(A ,*A ,**A ) __A = config.num_labels __A = TFRegNetMainLayer(A ,name="regnet" ) # classification head __A = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor = None ,A : tf.Tensor = None ,A : bool = None ,A : bool = None ,A : Union[str, Any]=False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.regnet( A ,output_hidden_states=A ,return_dict=A ,training=A ) __A = outputs.pooler_output if return_dict else outputs[1] __A = self.classifier[0](A ) __A = self.classifier[1](A ) __A = None if labels is None else self.hf_compute_loss(labels=A ,logits=A ) if not return_dict: __A = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=A ,logits=A ,hidden_states=outputs.hidden_states )
15
1
SCREAMING_SNAKE_CASE :Union[str, Any] = 0 # The first color of the flag. SCREAMING_SNAKE_CASE :Optional[int] = 1 # The second color of the flag. SCREAMING_SNAKE_CASE :str = 2 # The third color of the flag. SCREAMING_SNAKE_CASE :List[Any] = (red, white, blue) def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if not sequence: return [] if len(a_ ) == 1: return list(a_ ) __A = 0 __A = len(a_ ) - 1 __A = 0 while mid <= high: if sequence[mid] == colors[0]: __A , __A = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: __A , __A = sequence[high], sequence[mid] high -= 1 else: __A = F'''The elements inside the sequence must contains only {colors} values''' raise ValueError(a_ ) return sequence if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by commas:\n').strip() SCREAMING_SNAKE_CASE :Any = [int(item.strip()) for item in user_input.split(',')] print(f'''{dutch_national_flag_sort(unsorted)}''')
15
import math def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list: """simple docstring""" __A = end or len(a_ ) for i in range(a_ , a_ ): __A = i __A = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __A = array[temp_index - 1] temp_index -= 1 __A = temp_index_value return array def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap """simple docstring""" __A = index __A = 2 * index + 1 # Left Node __A = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __A = left_index if right_index < heap_size and array[largest] < array[right_index]: __A = right_index if largest != index: __A , __A = array[largest], array[index] heapify(a_ , a_ , a_ ) def UpperCAmelCase ( a_ ) -> list: """simple docstring""" __A = len(a_ ) for i in range(n // 2 , -1 , -1 ): heapify(a_ , a_ , a_ ) for i in range(n - 1 , 0 , -1 ): __A , __A = array[0], array[i] heapify(a_ , 0 , a_ ) return array def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" __A = low __A = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __A , __A = array[j], array[i] i += 1 def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) == 0: return array __A = 2 * math.ceil(math.loga(len(a_ ) ) ) __A = 1_6 return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ ) def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(a_ ) max_depth -= 1 __A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 ) __A = partition(a_ , a_ , a_ , a_ ) intro_sort(a_ , a_ , a_ , a_ , a_ ) __A = p return insertion_sort(a_ , a_ , a_ ) if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip() SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')] print(sort(unsorted))
15
1
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def UpperCAmelCase ( a_ ) -> bool: """simple docstring""" __A = int(number**0.5 ) return number == sq * sq def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> tuple[int, int]: """simple docstring""" __A = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __A = x_den * y_den * z_den __A = gcd(a_ , a_ ) top //= hcf bottom //= hcf return top, bottom def UpperCAmelCase ( a_ = 3_5 ) -> int: """simple docstring""" __A = set() __A = 42 __A = Fraction(0 ) __A = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 __A = x_num * y_den + x_den * y_num __A = x_den * y_den __A = gcd(a_ , a_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __A = add_three( a_ , a_ , a_ , a_ , a_ , a_ ) unique_s.add(a_ ) # n=2 __A = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __A = x_den * x_den * y_den * y_den if is_sq(a_ ) and is_sq(a_ ): __A = int(sqrt(a_ ) ) __A = int(sqrt(a_ ) ) __A = gcd(a_ , a_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __A = add_three( a_ , a_ , a_ , a_ , a_ , a_ ) unique_s.add(a_ ) # n=-1 __A = x_num * y_num __A = x_den * y_num + x_num * y_den __A = gcd(a_ , a_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __A = add_three( a_ , a_ , a_ , a_ , a_ , a_ ) unique_s.add(a_ ) # n=2 __A = x_num * x_num * y_num * y_num __A = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(a_ ) and is_sq(a_ ): __A = int(sqrt(a_ ) ) __A = int(sqrt(a_ ) ) __A = gcd(a_ , a_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __A = add_three( a_ , a_ , a_ , a_ , a_ , a_ ) unique_s.add(a_ ) for num, den in unique_s: total += Fraction(a_ , a_ ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
15
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml SCREAMING_SNAKE_CASE :Optional[int] = NewType('DataClass', Any) SCREAMING_SNAKE_CASE :int = NewType('DataClassType', Any) def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" if isinstance(a_ , a_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' ) def UpperCAmelCase ( a_ ) -> Callable[[str], Any]: """simple docstring""" __A = {str(a_ ): choice for choice in choices} return lambda a_ : str_to_choice.get(a_ , a_ ) def UpperCAmelCase ( *, a_ = None , a_ = None , a_ = dataclasses.MISSING , a_ = dataclasses.MISSING , a_ = None , **a_ , ) -> dataclasses.Field: """simple docstring""" if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls __A = {} if aliases is not None: __A = aliases if help is not None: __A = help return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_ ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 def __init__( self : Union[str, Any] ,A : Union[DataClassType, Iterable[DataClassType]] ,**A : List[Any] ): # To make the default appear when using --help if "formatter_class" not in kwargs: __A = ArgumentDefaultsHelpFormatter super().__init__(**A ) if dataclasses.is_dataclass(A ): __A = [dataclass_types] __A = list(A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(A ) @staticmethod def UpperCamelCase_ ( A : ArgumentParser ,A : dataclasses.Field ): __A = f'''--{field.name}''' __A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type ,A ): raise RuntimeError( "Unresolved type detected, which should have been done with the help of " "`typing.get_type_hints` method by default" ) __A = kwargs.pop("aliases" ,[] ) if isinstance(A ,A ): __A = [aliases] __A = getattr(field.type ,"__origin__" ,field.type ) if origin_type is Union or (hasattr(A ,"UnionType" ) and isinstance(A ,types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(A ) not in field.type.__args__ ): raise ValueError( "Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because" " the argument parser only supports one type per argument." f''' Problem encountered in field \'{field.name}\'.''' ) if type(A ) not in field.type.__args__: # filter `str` in Union __A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] __A = getattr(field.type ,"__origin__" ,field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) __A = ( field.type.__args__[0] if isinstance(A ,field.type.__args__[1] ) else field.type.__args__[1] ) __A = getattr(field.type ,"__origin__" ,field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) __A = {} if origin_type is Literal or (isinstance(field.type ,A ) and issubclass(field.type ,A )): if origin_type is Literal: __A = field.type.__args__ else: __A = [x.value for x in field.type] __A = make_choice_type_function(kwargs["choices"] ) if field.default is not dataclasses.MISSING: __A = field.default else: __A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument __A = copy(A ) # Hack because type=bool in argparse does not behave as we want. __A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. __A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way __A = default # This tells argparse we accept 0 or 1 value after --field_name __A = "?" # This is the value that will get picked if we do --field_name (without value) __A = True elif isclass(A ) and issubclass(A ,A ): __A = field.type.__args__[0] __A = "+" if field.default_factory is not dataclasses.MISSING: __A = field.default_factory() elif field.default is dataclasses.MISSING: __A = True else: __A = field.type if field.default is not dataclasses.MISSING: __A = field.default elif field.default_factory is not dataclasses.MISSING: __A = field.default_factory() else: __A = True parser.add_argument(A ,*A ,**A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): __A = False parser.add_argument(f'''--no_{field.name}''' ,action="store_false" ,dest=field.name ,**A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : DataClassType ): if hasattr(A ,"_argument_group_name" ): __A = self.add_argument_group(dtype._argument_group_name ) else: __A = self try: __A = get_type_hints(A ) except NameError: raise RuntimeError( f'''Type resolution failed for {dtype}. Try declaring the class in global scope or ''' "removing line of `from __future__ import annotations` which opts in Postponed " "Evaluation of Annotations (PEP 563)" ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A ): __A = ".".join(map(A ,sys.version_info[:3] ) ) raise RuntimeError( f'''Type resolution failed for {dtype} on Python {python_version}. Try removing ''' "line of `from __future__ import annotations` which opts in union types as " "`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To " "support Python versions that lower than 3.10, you need to use " "`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of " "`X | None`." ) from ex raise for field in dataclasses.fields(A ): if not field.init: continue __A = type_hints[field.name] self._parse_dataclass_field(A ,A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : List[Any]=None ,A : List[Any]=False ,A : Optional[Any]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=None ,): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): __A = [] if args_filename: args_files.append(Path(A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values __A = ArgumentParser() args_file_parser.add_argument(A ,type=A ,action="append" ) # Use only remaining args for further parsing (remove the args_file_flag) __A , __A = args_file_parser.parse_known_args(args=A ) __A = vars(A ).get(args_file_flag.lstrip("-" ) ,A ) if cmd_args_file_paths: args_files.extend([Path(A ) for p in cmd_args_file_paths] ) __A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last __A = file_args + args if args is not None else file_args + sys.argv[1:] __A , __A = self.parse_known_args(args=A ) __A = [] for dtype in self.dataclass_types: __A = {f.name for f in dataclasses.fields(A ) if f.init} __A = {k: v for k, v in vars(A ).items() if k in keys} for k in keys: delattr(A ,A ) __A = dtype(**A ) outputs.append(A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' ) return (*outputs,) def UpperCamelCase_ ( self : Dict ,A : Dict[str, Any] ,A : bool = False ): __A = set(args.keys() ) __A = [] for dtype in self.dataclass_types: __A = {f.name for f in dataclasses.fields(A ) if f.init} __A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) __A = dtype(**A ) outputs.append(A ) if not allow_extra_keys and unused_keys: raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(A )}''' ) return tuple(A ) def UpperCamelCase_ ( self : List[str] ,A : str ,A : bool = False ): with open(Path(A ) ,encoding="utf-8" ) as open_json_file: __A = json.loads(open_json_file.read() ) __A = self.parse_dict(A ,allow_extra_keys=A ) return tuple(A ) def UpperCamelCase_ ( self : int ,A : str ,A : bool = False ): __A = self.parse_dict(yaml.safe_load(Path(A ).read_text() ) ,allow_extra_keys=A ) return tuple(A )
15
1
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): SCREAMING_SNAKE_CASE :Any = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: SCREAMING_SNAKE_CASE :int = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def UpperCAmelCase ( a_ ) -> Optional[Any]: """simple docstring""" __A = (images / 2 + 0.5).clamp(0 , 1 ) __A = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __A = numpy_to_pil(a_ ) return images def UpperCAmelCase ( a_ ) -> int: """simple docstring""" if images.ndim == 3: __A = images[None, ...] __A = (images * 2_5_5).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images __A = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: __A = [Image.fromarray(a_ ) for image in images] return pil_images
15
SCREAMING_SNAKE_CASE :Any = 256 # Modulus to hash a string SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003 def UpperCAmelCase ( a_ , a_ ) -> bool: """simple docstring""" __A = len(a_ ) __A = len(a_ ) if p_len > t_len: return False __A = 0 __A = 0 __A = 1 # Calculating the hash of pattern and substring of text for i in range(a_ ): __A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ) -> None: """simple docstring""" __A = "abc1abc12" __A = "alskfjaldsabc1abc1abc12k23adsfabcabc" __A = "alskfjaldsk23adsfabcabc" assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ ) # Test 2) __A = "ABABX" __A = "ABABZABABYABABX" assert rabin_karp(a_ , a_ ) # Test 3) __A = "AAAB" __A = "ABAAAAAB" assert rabin_karp(a_ , a_ ) # Test 4) __A = "abcdabcy" __A = "abcxabcdabxabcdabcdabcy" assert rabin_karp(a_ , a_ ) # Test 5) __A = "Lü" __A = "Lüsai" assert rabin_karp(a_ , a_ ) __A = "Lue" assert not rabin_karp(a_ , a_ ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
15
1
from __future__ import annotations from typing import Any class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class UpperCAmelCase : '''simple docstring''' def __init__( self : Any ,A : Any ): __A = data __A = None def __iter__( self : Any ): __A = self __A = [] while node: if node in visited: raise ContainsLoopError visited.append(A ) yield node.data __A = node.next_node @property def UpperCamelCase_ ( self : Optional[Any] ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": SCREAMING_SNAKE_CASE :Optional[Any] = Node(1) SCREAMING_SNAKE_CASE :int = Node(2) SCREAMING_SNAKE_CASE :List[Any] = Node(3) SCREAMING_SNAKE_CASE :Union[str, Any] = Node(4) print(root_node.has_loop) # False SCREAMING_SNAKE_CASE :Any = root_node.next_node print(root_node.has_loop) # True SCREAMING_SNAKE_CASE :List[Any] = Node(5) SCREAMING_SNAKE_CASE :List[Any] = Node(6) SCREAMING_SNAKE_CASE :Tuple = Node(5) SCREAMING_SNAKE_CASE :Optional[Any] = Node(6) print(root_node.has_loop) # False SCREAMING_SNAKE_CASE :Tuple = Node(1) print(root_node.has_loop) # False
15
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel SCREAMING_SNAKE_CASE :Union[str, Any] = False SCREAMING_SNAKE_CASE :Any = True SCREAMING_SNAKE_CASE :Tuple = False if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() parser.add_argument( '--repo_path', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE :Dict = { 'image_size': 'sample_size', 'num_res_blocks': 'layers_per_block', 'block_channels': 'block_out_channels', 'down_blocks': 'down_block_types', 'up_blocks': 'up_block_types', 'downscale_freq_shift': 'freq_shift', 'resnet_num_groups': 'norm_num_groups', 'resnet_act_fn': 'act_fn', 'resnet_eps': 'norm_eps', 'num_head_channels': 'attention_head_dim', } SCREAMING_SNAKE_CASE :Optional[int] = { 'time_steps': 'time_proj', 'mid': 'mid_block', 'downsample_blocks': 'down_blocks', 'upsample_blocks': 'up_blocks', } SCREAMING_SNAKE_CASE :int = '' if has_file(args.repo_path, 'config.json') else 'unet' with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader: SCREAMING_SNAKE_CASE :Dict = reader.read() SCREAMING_SNAKE_CASE :List[str] = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, 'config.json'): SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel(**config) else: SCREAMING_SNAKE_CASE :Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel SCREAMING_SNAKE_CASE :List[str] = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) SCREAMING_SNAKE_CASE :List[str] = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: SCREAMING_SNAKE_CASE :Optional[Any] = config[key] del config[key] SCREAMING_SNAKE_CASE :Optional[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']] SCREAMING_SNAKE_CASE :List[Any] = [k.replace('UNetRes', '') for k in config['up_block_types']] if do_only_weights: SCREAMING_SNAKE_CASE :Tuple = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin')) SCREAMING_SNAKE_CASE :Any = {} for param_key, param_value in state_dict.items(): if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'): continue SCREAMING_SNAKE_CASE :List[str] = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('.')[0] == key: SCREAMING_SNAKE_CASE :List[Any] = param_value SCREAMING_SNAKE_CASE :str = True if not has_changed: SCREAMING_SNAKE_CASE :List[str] = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
15
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. SCREAMING_SNAKE_CASE :str = abspath(join(dirname(dirname(__file__)), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def UpperCAmelCase ( a_ ) -> List[Any]: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(a_ ) def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main __A = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(a_ , id=a_ )
15
import argparse import math import traceback import dateutil.parser as date_parser import requests def UpperCAmelCase ( a_ ) -> str: """simple docstring""" __A = {} __A = job["started_at"] __A = job["completed_at"] __A = date_parser.parse(a_ ) __A = date_parser.parse(a_ ) __A = round((end_datetime - start_datetime).total_seconds() / 60.0 ) __A = start __A = end __A = duration_in_min return job_info def UpperCAmelCase ( a_ , a_=None ) -> str: """simple docstring""" __A = None if token is not None: __A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} __A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' __A = requests.get(a_ , headers=a_ ).json() __A = {} try: job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} ) __A = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 ) for i in range(a_ ): __A = requests.get(url + F'''&page={i + 2}''' , headers=a_ ).json() job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args() SCREAMING_SNAKE_CASE :Union[str, Any] = get_job_time(args.workflow_run_id) SCREAMING_SNAKE_CASE :Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'''{k}: {v["duration"]}''')
15
1
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path SCREAMING_SNAKE_CASE :Any = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def UpperCAmelCase ( a_=True ) -> Optional[Any]: """simple docstring""" if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__SCREAMING_SNAKE_CASE ) ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = None snake_case_ = None def UpperCamelCase_ ( self : List[Any] ,A : Optional[int] ,A : List[Any] ): with TemporaryDirectory() as tmp_dir: __A = dataset_module_factory(A ,cache_dir=A ) __A = import_main_class(dataset_module.module_path ,dataset=A ) __A = builder_cls( cache_dir=A ,config_name=A ,hash=dataset_module.hash ,) __A = "/".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=A ).replace(os.sep ,"/" ), config.DATASET_INFO_FILENAME, ] ) __A = cached_path(A ,cache_dir=A ) self.assertTrue(os.path.exists(A ) ) @pytest.mark.integration def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" __A = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple" __A = dataset_module_factory("wikipedia" , cache_dir=a_ ) __A = import_main_class(dataset_module.module_path ) __A = builder_cls( cache_dir=a_ , config_name="20220301.frr" , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam __A = None builder_instance.download_and_prepare() __A = builder_instance.as_dataset() assert ds @pytest.mark.integration def UpperCAmelCase ( a_ ) -> Tuple: """simple docstring""" __A = dataset_module_factory("wikipedia" , cache_dir=a_ ) __A = import_main_class(dataset_module.module_path , dataset=a_ ) __A = builder_cls( cache_dir=a_ , config_name="20220301.frr" , hash=dataset_module.hash , ) __A = builder_instance.as_streaming_dataset() assert ds assert isinstance(a_ , a_ ) assert "train" in ds assert isinstance(ds["train"] , a_ ) assert next(iter(ds["train"] ) )
15
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" __A = args.pruning_method __A = args.threshold __A = args.model_name_or_path.rstrip("/" ) __A = args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) __A = torch.load(os.path.join(a_ , "pytorch_model.bin" ) ) __A = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "bias" in name: __A = tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": __A = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = TopKBinarizer.apply(a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = ThresholdBinarizer.apply(a_ , a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A , __A = -0.1, 1.1 __A = torch.sigmoid(a_ ) __A = s * (r - l) + l __A = s_bar.clamp(min=0.0 , max=1.0 ) __A = tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError("Unknown pruning method" ) if target_model_path is None: __A = os.path.join( os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' ) if not os.path.isdir(a_ ): shutil.copytree(a_ , a_ ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(a_ , os.path.join(a_ , "pytorch_model.bin" ) ) print("\nPruned model saved! See you later!" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) SCREAMING_SNAKE_CASE :str = parser.parse_args() main(args)
15
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :int = '▁' SCREAMING_SNAKE_CASE :Dict = {'vocab_file': 'sentencepiece.bpe.model'} SCREAMING_SNAKE_CASE :List[str] = { 'vocab_file': { 'facebook/mbart-large-50-one-to-many-mmt': ( 'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model' ), } } SCREAMING_SNAKE_CASE :Optional[int] = { 'facebook/mbart-large-50-one-to-many-mmt': 1024, } # fmt: off SCREAMING_SNAKE_CASE :Any = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI'] class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = ["input_ids", "attention_mask"] snake_case_ = [] snake_case_ = [] def __init__( self : Union[str, Any] ,A : int ,A : List[Any]=None ,A : List[Any]=None ,A : Optional[Any]="</s>" ,A : int="</s>" ,A : Any="<s>" ,A : Tuple="<unk>" ,A : Dict="<pad>" ,A : Optional[Any]="<mask>" ,A : Optional[Dict[str, Any]] = None ,**A : int ,): # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token __A = {} if sp_model_kwargs is None else sp_model_kwargs __A = kwargs.get("additional_special_tokens" ,[] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=A ,tgt_lang=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A ) ) __A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __A = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __A = 1 __A = len(self.sp_model ) __A = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A ) } __A = {v: k for k, v in self.lang_code_to_id.items()} __A = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __A = src_lang if src_lang is not None else "en_XX" __A = self.lang_code_to_id[self._src_lang] __A = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCamelCase_ ( self : Optional[Any] ): return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def UpperCamelCase_ ( self : Dict ): return self._src_lang @src_lang.setter def UpperCamelCase_ ( self : Optional[int] ,A : str ): __A = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Optional[Any] ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : int ,A : Dict ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : List[Any] ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self : Dict ,A : str ): return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : str ,A : str ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __A = self.sp_model.PieceToId(A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCamelCase_ ( self : Optional[Any] ,A : int ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self : str ,A : Any ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string.strip() def UpperCamelCase_ ( self : str ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self : Optional[Any] ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) __A = [1] * len(self.prefix_tokens ) __A = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(A )) + suffix_ones return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones def UpperCamelCase_ ( self : List[Any] ,A : List[int] ,A : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase_ ( self : Dict ,A : Any ,A : str ,A : Optional[str] ,A : Optional[str] ,**A : Optional[int] ): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) __A = src_lang __A = self(A ,add_special_tokens=A ,return_tensors=A ,**A ) __A = self.convert_tokens_to_ids(A ) __A = tgt_lang_id return inputs def UpperCamelCase_ ( self : int ,A : List[str] ,A : str = "en_XX" ,A : Optional[List[str]] = None ,A : str = "ro_RO" ,**A : str ,): __A = src_lang __A = tgt_lang return super().prepare_seqaseq_batch(A ,A ,**A ) def UpperCamelCase_ ( self : Tuple ): return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase_ ( self : Dict ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase_ ( self : Optional[Any] ,A : str ): __A = self.lang_code_to_id[src_lang] __A = [self.cur_lang_code_id] __A = [self.eos_token_id] def UpperCamelCase_ ( self : Dict ,A : str ): __A = self.lang_code_to_id[tgt_lang] __A = [self.cur_lang_code_id] __A = [self.eos_token_id]
15
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :int = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE :Union[str, Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } SCREAMING_SNAKE_CASE :int = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] snake_case_ = [] def __init__( self : Any ,A : List[str] ,A : str="<unk>" ,A : int="<s>" ,A : Union[str, Any]="</s>" ,A : List[str]="<pad>" ,A : int="[SEP]" ,A : Optional[Any]="[MASK]" ,A : Tuple="[CLS]" ,A : Optional[Dict[str, Any]] = None ,**A : Any ,): __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token __A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sep_token=A ,mask_token=A ,cls_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = vocab_file __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) @property def UpperCamelCase_ ( self : List[str] ): return self.sp_model.get_piece_size() def UpperCamelCase_ ( self : Optional[Any] ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[int] ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : str ,A : Optional[Any] ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : List[str] ,A : Tuple ): return self.sp_model.piece_to_id(A ) def UpperCamelCase_ ( self : List[Any] ,A : Tuple ): __A = self.sp_model.IdToPiece(A ) return token def UpperCamelCase_ ( self : List[Any] ,A : int ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string.strip() def UpperCamelCase_ ( self : Tuple ,A : List[int] ,A : bool = False ,A : bool = None ,A : bool = True ,**A : Union[str, Any] ,): __A = kwargs.pop("use_source_tokenizer" ,A ) __A = self.convert_ids_to_tokens(A ,skip_special_tokens=A ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __A = [] __A = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) __A = [] sub_texts.append(A ) else: current_sub_text.append(A ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: __A = re.sub(R" (\[(MASK|SEP)\])" ,R"\1" ," ".join(A ) ) else: __A = "".join(A ) __A = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __A = self.clean_up_tokenization(A ) return clean_text else: return text def UpperCamelCase_ ( self : str ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A = [self.cls_token_id] __A = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self : Optional[int] ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1] def UpperCamelCase_ ( self : Any ,A : List[int] ,A : Optional[List[int]] = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
15
1
def UpperCAmelCase ( a_ , a_ ) -> int: """simple docstring""" return 1 if input_a == input_a else 0 def UpperCAmelCase ( ) -> None: """simple docstring""" assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
15
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): SCREAMING_SNAKE_CASE :Any = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: SCREAMING_SNAKE_CASE :int = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def UpperCAmelCase ( a_ ) -> Optional[Any]: """simple docstring""" __A = (images / 2 + 0.5).clamp(0 , 1 ) __A = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __A = numpy_to_pil(a_ ) return images def UpperCAmelCase ( a_ ) -> int: """simple docstring""" if images.ndim == 3: __A = images[None, ...] __A = (images * 2_5_5).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images __A = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: __A = [Image.fromarray(a_ ) for image in images] return pil_images
15
1
import datasets from .evaluate import evaluate SCREAMING_SNAKE_CASE :List[str] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE :Optional[int] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' SCREAMING_SNAKE_CASE :Optional[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": { "id": datasets.Value("string" ), "prediction_text": datasets.features.Sequence(datasets.Value("string" ) ), }, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) ,codebase_urls=["https://www.atticusprojectai.org/cuad"] ,reference_urls=["https://www.atticusprojectai.org/cuad"] ,) def UpperCamelCase_ ( self : Dict ,A : Optional[int] ,A : Dict ): __A = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} __A = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] __A = evaluate(dataset=A ,predictions=A ) return score
15
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :List[Any] = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "yolos" def __init__( self : Any ,A : Optional[Any]=7_68 ,A : Dict=12 ,A : Any=12 ,A : str=30_72 ,A : Any="gelu" ,A : str=0.0 ,A : List[str]=0.0 ,A : Dict=0.02 ,A : int=1E-12 ,A : Tuple=[5_12, 8_64] ,A : List[Any]=16 ,A : str=3 ,A : str=True ,A : Any=1_00 ,A : Dict=True ,A : Dict=False ,A : Tuple=1 ,A : Union[str, Any]=5 ,A : Optional[Any]=2 ,A : Union[str, Any]=5 ,A : int=2 ,A : int=0.1 ,**A : List[str] ,): super().__init__(**A ) __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = initializer_range __A = layer_norm_eps __A = image_size __A = patch_size __A = num_channels __A = qkv_bias __A = num_detection_tokens __A = use_mid_position_embeddings __A = auxiliary_loss # Hungarian matcher __A = class_cost __A = bbox_cost __A = giou_cost # Loss coefficients __A = bbox_loss_coefficient __A = giou_loss_coefficient __A = eos_coefficient class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = version.parse("1.11" ) @property def UpperCamelCase_ ( self : str ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase_ ( self : List[Any] ): return 1E-4 @property def UpperCamelCase_ ( self : Optional[Any] ): return 12
15
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = ["pixel_values"] def __init__( self : Optional[int] ,A : bool = True ,A : Dict[str, int] = None ,A : PILImageResampling = PILImageResampling.BICUBIC ,A : bool = True ,A : Union[int, float] = 1 / 2_55 ,A : bool = True ,A : Optional[Union[float, List[float]]] = None ,A : Optional[Union[float, List[float]]] = None ,A : bool = True ,**A : str ,): super().__init__(**A ) __A = size if size is not None else {"height": 3_84, "width": 3_84} __A = get_size_dict(A ,default_to_square=A ) __A = do_resize __A = size __A = resample __A = do_rescale __A = rescale_factor __A = do_normalize __A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __A = image_std if image_std is not None else OPENAI_CLIP_STD __A = do_convert_rgb def UpperCamelCase_ ( self : Any ,A : np.ndarray ,A : Dict[str, int] ,A : PILImageResampling = PILImageResampling.BICUBIC ,A : Optional[Union[str, ChannelDimension]] = None ,**A : Optional[Any] ,): __A = get_size_dict(A ,default_to_square=A ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) __A = (size["height"], size["width"]) return resize(A ,size=A ,resample=A ,data_format=A ,**A ) def UpperCamelCase_ ( self : Any ,A : np.ndarray ,A : Union[int, float] ,A : Optional[Union[str, ChannelDimension]] = None ,**A : Any ,): return rescale(A ,scale=A ,data_format=A ,**A ) def UpperCamelCase_ ( self : Any ,A : np.ndarray ,A : Union[float, List[float]] ,A : Union[float, List[float]] ,A : Optional[Union[str, ChannelDimension]] = None ,**A : Optional[Any] ,): return normalize(A ,mean=A ,std=A ,data_format=A ,**A ) def UpperCamelCase_ ( self : Optional[Any] ,A : ImageInput ,A : Optional[bool] = None ,A : Optional[Dict[str, int]] = None ,A : PILImageResampling = None ,A : Optional[bool] = None ,A : Optional[float] = None ,A : Optional[bool] = None ,A : Optional[Union[float, List[float]]] = None ,A : Optional[Union[float, List[float]]] = None ,A : Optional[Union[str, TensorType]] = None ,A : bool = None ,A : ChannelDimension = ChannelDimension.FIRST ,**A : Dict ,): __A = do_resize if do_resize is not None else self.do_resize __A = resample if resample is not None else self.resample __A = do_rescale if do_rescale is not None else self.do_rescale __A = rescale_factor if rescale_factor is not None else self.rescale_factor __A = do_normalize if do_normalize is not None else self.do_normalize __A = image_mean if image_mean is not None else self.image_mean __A = image_std if image_std is not None else self.image_std __A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __A = size if size is not None else self.size __A = get_size_dict(A ,default_to_square=A ) __A = make_list_of_images(A ) if not valid_images(A ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: __A = [convert_to_rgb(A ) for image in images] # All transformations expect numpy arrays. __A = [to_numpy_array(A ) for image in images] if do_resize: __A = [self.resize(image=A ,size=A ,resample=A ) for image in images] if do_rescale: __A = [self.rescale(image=A ,scale=A ) for image in images] if do_normalize: __A = [self.normalize(image=A ,mean=A ,std=A ) for image in images] __A = [to_channel_dimension_format(A ,A ) for image in images] __A = BatchFeature(data={"pixel_values": images} ,tensor_type=A ) return encoded_outputs
15
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) SCREAMING_SNAKE_CASE :List[str] = 'pytorch_model.bin' SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json' SCREAMING_SNAKE_CASE :Optional[int] = 'adapter_config.json' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.bin' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.safetensors' SCREAMING_SNAKE_CASE :str = 'tf_model.h5' SCREAMING_SNAKE_CASE :List[Any] = 'tf_model.h5.index.json' SCREAMING_SNAKE_CASE :str = 'model.ckpt' SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack' SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack.index.json' SCREAMING_SNAKE_CASE :Tuple = 'model.safetensors' SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors.index.json' SCREAMING_SNAKE_CASE :str = 'config.json' SCREAMING_SNAKE_CASE :int = 'preprocessor_config.json' SCREAMING_SNAKE_CASE :Optional[Any] = FEATURE_EXTRACTOR_NAME SCREAMING_SNAKE_CASE :Optional[int] = 'generation_config.json' SCREAMING_SNAKE_CASE :List[str] = 'modelcard.json' SCREAMING_SNAKE_CASE :Optional[int] = '▁' SCREAMING_SNAKE_CASE :Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility SCREAMING_SNAKE_CASE :str = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. SCREAMING_SNAKE_CASE :Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] SCREAMING_SNAKE_CASE :List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def UpperCAmelCase ( a_ ) -> Dict: """simple docstring""" if version.parse(a_ ) < version.parse(a_ ): if "dev" in min_version: __A = ( "This example requires a source install from HuggingFace Transformers (see " "`https://huggingface.co/docs/transformers/installation#install-from-source`)," ) else: __A = F'''This example requires a minimum version of {min_version},''' error_message += F''' but the version found is {__version__}.\n''' raise ImportError( error_message + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " "versions of HuggingFace Transformers." )
15
1
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( a_ = 3 ) -> qiskit.result.counts.Counts: """simple docstring""" if isinstance(a_ , a_ ): raise TypeError("number of qubits must be a integer." ) if number_of_qubits <= 0: raise ValueError("number of qubits must be > 0." ) if math.floor(a_ ) != number_of_qubits: raise ValueError("number of qubits must be exact integer." ) if number_of_qubits > 1_0: raise ValueError("number of qubits too large to simulate(>10)." ) __A = QuantumRegister(a_ , "qr" ) __A = ClassicalRegister(a_ , "cr" ) __A = QuantumCircuit(a_ , a_ ) __A = number_of_qubits for i in range(a_ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(a_ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , a_ , a_ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(a_ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(a_ , a_ ) # simulate with 10000 shots __A = Aer.get_backend("qasm_simulator" ) __A = execute(a_ , a_ , shots=1_0_0_0_0 ) return job.result().get_counts(a_ ) if __name__ == "__main__": print( f'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
15
def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" __A = [0] * len(a_ ) __A = [] __A = [1] * len(a_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(a_ ) ): if indegree[i] == 0: queue.append(a_ ) while queue: __A = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: __A = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(a_ ) print(max(a_ ) ) # Adjacency list of Graph SCREAMING_SNAKE_CASE :List[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
15
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE :str = { 'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'], 'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Dict = ['VisionTextDualEncoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Any = ['FlaxVisionTextDualEncoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :List[str] = ['TFVisionTextDualEncoderModel'] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys SCREAMING_SNAKE_CASE :str = _LazyModule(__name__, globals()['__file__'], _import_structure)
15
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def UpperCAmelCase ( a_ , a_ ) -> Tuple: """simple docstring""" __A = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __A = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" ) __A = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" ) __A = key.replace("heads.cmd.itm_head.cls" , "itm_head" ) __A = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" ) __A = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" ) __A = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" ) __A = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" ) __A = key.replace("mm_text_projection" , "flava.text_to_mm_projection" ) __A = key.replace("mm_image_projection" , "flava.image_to_mm_projection" ) __A = key.replace("image_encoder.module" , "flava.image_model" ) __A = key.replace("text_encoder.module" , "flava.text_model" ) __A = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" ) __A = key.replace("mm_encoder.module" , "flava.multimodal_model" ) __A = key.replace("text_projection" , "flava.text_projection" ) __A = key.replace("image_projection" , "flava.image_projection" ) __A = value.float() for key, value in codebook_state_dict.items(): __A = value return upgrade @torch.no_grad() def UpperCAmelCase ( a_ , a_ , a_ , a_=None ) -> Tuple: """simple docstring""" if config_path is not None: __A = FlavaConfig.from_pretrained(a_ ) else: __A = FlavaConfig() __A = FlavaForPreTraining(a_ ).eval() __A = convert_dalle_checkpoint(a_ , a_ , save_checkpoint=a_ ) if os.path.exists(a_ ): __A = torch.load(a_ , map_location="cpu" ) else: __A = torch.hub.load_state_dict_from_url(a_ , map_location="cpu" ) __A = upgrade_state_dict(a_ , a_ ) hf_model.load_state_dict(a_ ) __A = hf_model.state_dict() __A = count_parameters(a_ ) __A = count_parameters(a_ ) + count_parameters(a_ ) assert torch.allclose(a_ , a_ , atol=1E-3 ) hf_model.save_pretrained(a_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
15
1
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Any = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "mvp" snake_case_ = ["past_key_values"] snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : str ,A : Optional[Any]=5_02_67 ,A : int=10_24 ,A : List[Any]=12 ,A : Any=40_96 ,A : Dict=16 ,A : Any=12 ,A : Optional[int]=40_96 ,A : Optional[int]=16 ,A : List[Any]=0.0 ,A : List[Any]=0.0 ,A : Optional[Any]="gelu" ,A : int=10_24 ,A : int=0.1 ,A : Tuple=0.0 ,A : Optional[Any]=0.0 ,A : Optional[Any]=0.02 ,A : str=0.0 ,A : Any=False ,A : Optional[Any]=True ,A : str=1 ,A : Optional[Any]=0 ,A : Optional[Any]=2 ,A : List[Any]=True ,A : int=2 ,A : str=2 ,A : List[Any]=False ,A : str=1_00 ,A : Any=8_00 ,**A : str ,): __A = vocab_size __A = max_position_embeddings __A = d_model __A = encoder_ffn_dim __A = encoder_layers __A = encoder_attention_heads __A = decoder_ffn_dim __A = decoder_layers __A = decoder_attention_heads __A = dropout __A = attention_dropout __A = activation_dropout __A = activation_function __A = init_std __A = encoder_layerdrop __A = decoder_layerdrop __A = classifier_dropout __A = use_cache __A = encoder_layers __A = scale_embedding # scale factor will be sqrt(d_model) if True __A = use_prompt __A = prompt_length __A = prompt_mid_dim super().__init__( pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,is_encoder_decoder=A ,decoder_start_token_id=A ,forced_eos_token_id=A ,**A ,) if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" ,A ): __A = self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' "The config can simply be saved and uploaded again to be fixed." )
15
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} SCREAMING_SNAKE_CASE :Tuple = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } SCREAMING_SNAKE_CASE :List[Any] = { 'camembert-base': 512, } SCREAMING_SNAKE_CASE :List[str] = '▁' class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] ,A : List[str] ,A : List[Any]="<s>" ,A : Tuple="</s>" ,A : Any="</s>" ,A : Optional[Any]="<s>" ,A : Tuple="<unk>" ,A : str="<pad>" ,A : int="<mask>" ,A : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] ,A : Optional[Dict[str, Any]] = None ,**A : Optional[Any] ,): # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token __A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,additional_special_tokens=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A ) ) __A = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __A = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} __A = len(self.fairseq_tokens_to_ids ) __A = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self : int ,A : List[int] ,A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A = [self.cls_token_id] __A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1] def UpperCamelCase_ ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self : Dict ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self : int ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : List[str] ,A : Dict ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(A ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(A ) def UpperCamelCase_ ( self : Dict ,A : Tuple ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string.strip() def __getstate__( self : Dict ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : Union[str, Any] ,A : Any ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : Any ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,)
15
1
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex SCREAMING_SNAKE_CASE :int = logging.getLogger(__name__) class UpperCAmelCase : '''simple docstring''' def __init__( self : int ): __A = False def UpperCamelCase_ ( self : Optional[Any] ,A : Optional[Any] ,A : str ,A : Optional[Any] ,A : Union[str, Any] ): if not self.initialized: __A = RagRetriever( A ,question_encoder_tokenizer=A ,generator_tokenizer=A ,index=A ,init_retrieval=A ,) __A = True def UpperCamelCase_ ( self : List[Any] ): self.retriever.index.init_index() def UpperCamelCase_ ( self : List[Any] ,A : Optional[int] ,A : Dict ): __A , __A = self.retriever._main_retrieve(A ,A ) return doc_ids, retrieved_doc_embeds class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Tuple ,A : int ,A : Union[str, Any] ,A : List[Any] ,A : Tuple ,A : List[str]=None ): if index is not None and index.is_initialized() and len(A ) > 0: raise ValueError( "When using Ray for distributed fine-tuning, " "you'll need to provide the paths instead, " "as the dataset and the index are loaded " "separately. More info in examples/rag/use_own_knowledge_dataset.py " ) super().__init__( A ,question_encoder_tokenizer=A ,generator_tokenizer=A ,index=A ,init_retrieval=A ,) __A = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(A ,A ,A ,A ) for worker in self.retrieval_workers ] ) def UpperCamelCase_ ( self : str ): logger.info("initializing retrieval" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ,A : List[Any] ): if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. __A = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )] __A , __A = ray.get(random_worker.retrieve.remote(A ,A ) ) else: __A , __A = self._main_retrieve(A ,A ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A ) @classmethod def UpperCamelCase_ ( cls : str ,A : List[Any] ,A : int=None ,**A : Optional[Any] ): return super(A ,cls ).get_tokenizers(A ,A ,**A ) @classmethod def UpperCamelCase_ ( cls : Any ,A : List[str] ,A : Optional[Any] ,A : List[str]=None ,**A : List[str] ): __A = kwargs.pop("config" ,A ) or RagConfig.from_pretrained(A ,**A ) __A = RagTokenizer.from_pretrained(A ,config=A ) __A = rag_tokenizer.question_encoder __A = rag_tokenizer.generator if indexed_dataset is not None: __A = "custom" __A = CustomHFIndex(config.retrieval_vector_size ,A ) else: __A = cls._build_index(A ) return cls( A ,question_encoder_tokenizer=A ,generator_tokenizer=A ,retrieval_workers=A ,index=A ,)
15
def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) <= 1: return [tuple(a_ )] __A = [] def generate(a_ , a_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , a_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even __A , __A = arr[k - 1], arr[i] else: # k is odd __A , __A = arr[k - 1], arr[0] generate(k - 1 , a_ ) generate(len(a_ ) , a_ ) return res if __name__ == "__main__": SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by a comma:\n').strip() SCREAMING_SNAKE_CASE :Dict = [int(item) for item in user_input.split(',')] print(heaps(arr))
15
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all BART models at https://huggingface.co/models?filter=bart SCREAMING_SNAKE_CASE :Dict = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, 'tokenizer_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json', }, } SCREAMING_SNAKE_CASE :Tuple = { 'facebook/bart-base': 1024, 'facebook/bart-large': 1024, 'facebook/bart-large-mnli': 1024, 'facebook/bart-large-cnn': 1024, 'facebook/bart-large-xsum': 1024, 'yjernite/bart_eli5': 1024, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] snake_case_ = BartTokenizer def __init__( self : Any ,A : Optional[Any]=None ,A : str=None ,A : Optional[Any]=None ,A : Tuple="replace" ,A : List[Any]="<s>" ,A : Dict="</s>" ,A : str="</s>" ,A : Union[str, Any]="<s>" ,A : List[Any]="<unk>" ,A : str="<pad>" ,A : Union[str, Any]="<mask>" ,A : int=False ,A : Optional[int]=True ,**A : Any ,): super().__init__( A ,A ,tokenizer_file=A ,errors=A ,bos_token=A ,eos_token=A ,sep_token=A ,cls_token=A ,unk_token=A ,pad_token=A ,mask_token=A ,add_prefix_space=A ,trim_offsets=A ,**A ,) __A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" ,A ) != add_prefix_space: __A = getattr(A ,pre_tok_state.pop("type" ) ) __A = add_prefix_space __A = pre_tok_class(**A ) __A = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __A = "post_processor" __A = getattr(self.backend_tokenizer ,A ,A ) if tokenizer_component_instance: __A = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __A = tuple(state["sep"] ) if "cls" in state: __A = tuple(state["cls"] ) __A = False if state.get("add_prefix_space" ,A ) != add_prefix_space: __A = add_prefix_space __A = True if state.get("trim_offsets" ,A ) != trim_offsets: __A = trim_offsets __A = True if changes_to_apply: __A = getattr(A ,state.pop("type" ) ) __A = component_class(**A ) setattr(self.backend_tokenizer ,A ,A ) @property def UpperCamelCase_ ( self : Tuple ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def UpperCamelCase_ ( self : str ,A : Dict ): __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else value __A = value def UpperCamelCase_ ( self : List[str] ,*A : List[Any] ,**A : List[str] ): __A = kwargs.get("is_split_into_words" ,A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*A ,**A ) def UpperCamelCase_ ( self : Optional[Any] ,*A : Tuple ,**A : Any ): __A = kwargs.get("is_split_into_words" ,A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*A ,**A ) def UpperCamelCase_ ( self : Tuple ,A : str ,A : Optional[str] = None ): __A = self._tokenizer.model.save(A ,name=A ) return tuple(A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : Optional[Any] ,A : Union[str, Any]=None ): __A = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCamelCase_ ( self : int ,A : List[int] ,A : Optional[List[int]] = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
15
def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) <= 1: return lst __A = 1 while i < len(a_ ): if lst[i - 1] <= lst[i]: i += 1 else: __A , __A = lst[i], lst[i - 1] i -= 1 if i == 0: __A = 1 return lst if __name__ == "__main__": SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma:\n').strip() SCREAMING_SNAKE_CASE :List[Any] = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
15
1
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,A : Tuple ,A : str=13 ,A : List[str]=7 ,A : Dict=True ,A : Optional[Any]=True ,A : Union[str, Any]=False ,A : Optional[int]=True ,A : int=99 ,A : Any=32 ,A : int=5 ,A : Tuple=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : int=0.1 ,A : Optional[Any]=0.1 ,A : int=5_12 ,A : Tuple=16 ,A : Any=2 ,A : int=0.02 ,A : Optional[int]=3 ,A : str=4 ,A : Tuple=None ,): __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_input_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_labels __A = num_choices __A = scope def UpperCamelCase_ ( self : int ): __A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __A = None if self.use_input_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) __A = None __A = None __A = None if self.use_labels: __A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __A = ids_tensor([self.batch_size] ,self.num_choices ) __A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self : Optional[int] ): return BioGptConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,) def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Any ,A : Any ,A : Any ,A : int ,A : List[str] ,A : List[Any] ): __A = BioGptModel(config=A ) model.to(A ) model.eval() __A = model(A ,attention_mask=A ) __A = model(A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : Any ,A : Tuple ,A : str ,A : Optional[Any] ,A : str ,A : Dict ,A : Dict ,A : Tuple ,A : Union[str, Any] ,A : Tuple ,): __A = BioGptForCausalLM(config=A ) model.to(A ) model.eval() __A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self : Union[str, Any] ,A : int ,A : Dict ,A : Optional[Any] ,A : List[Any] ,A : Any ,*A : Optional[Any] ): __A = BioGptModel(config=A ) model.to(A ) model.eval() # create attention mask __A = torch.ones(input_ids.shape ,dtype=torch.long ,device=A ) __A = self.seq_length // 2 __A = 0 # first forward pass __A , __A = model(A ,attention_mask=A ).to_tuple() # create hypothetical next token and extent to next_input_ids __A = ids_tensor((self.batch_size, 1) ,config.vocab_size ) # change a random masked slice from input_ids __A = ids_tensor((1,) ,A ).item() + 1 __A = ids_tensor((self.batch_size, 1) ,config.vocab_size ).squeeze(-1 ) __A = random_other_next_tokens # append to next input_ids and attn_mask __A = torch.cat([input_ids, next_tokens] ,dim=-1 ) __A = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) ,dtype=torch.long ,device=A )] ,dim=1 ,) # get two different outputs __A = model(A ,attention_mask=A )["last_hidden_state"] __A = model(A ,past_key_values=A ,attention_mask=A )["last_hidden_state"] # select random slice __A = ids_tensor((1,) ,output_from_past.shape[-1] ).item() __A = output_from_no_past[:, -1, random_slice_idx].detach() __A = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A ,A ,atol=1E-3 ) ) def UpperCamelCase_ ( self : Dict ,A : List[str] ,A : Dict ,A : Dict ,A : Tuple ,A : Optional[int] ,*A : int ): __A = BioGptModel(config=A ).to(A ).eval() __A = torch.ones(input_ids.shape ,dtype=torch.long ,device=A ) # first forward pass __A = model(A ,attention_mask=A ,use_cache=A ) __A , __A = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __A = ids_tensor((self.batch_size, 3) ,config.vocab_size ) __A = ids_tensor((self.batch_size, 3) ,2 ) # append to next input_ids and __A = torch.cat([input_ids, next_tokens] ,dim=-1 ) __A = torch.cat([attention_mask, next_attn_mask] ,dim=-1 ) __A = model(A ,attention_mask=A )["last_hidden_state"] __A = model(A ,attention_mask=A ,past_key_values=A )[ "last_hidden_state" ] # select random slice __A = ids_tensor((1,) ,output_from_past.shape[-1] ).item() __A = output_from_no_past[:, -3:, random_slice_idx].detach() __A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A ,A ,atol=1E-3 ) ) def UpperCamelCase_ ( self : Any ,A : Union[str, Any] ,A : List[Any] ,A : str ,A : List[Any] ,A : Optional[int] ,*A : str ,A : List[Any]=False ): __A = BioGptForCausalLM(A ) model.to(A ) if gradient_checkpointing: model.gradient_checkpointing_enable() __A = model(A ,labels=A ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCamelCase_ ( self : Tuple ,A : str ,*A : List[Any] ): __A = BioGptModel(A ) __A = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) ,0.0_01 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) ,0.01 ) def UpperCamelCase_ ( self : Dict ,A : str ,A : int ,A : str ,A : List[Any] ,A : str ,*A : Optional[Any] ): __A = self.num_labels __A = BioGptForTokenClassification(A ) model.to(A ) model.eval() __A = model(A ,attention_mask=A ,token_type_ids=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self : str ): __A = self.prepare_config_and_inputs() ( ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ) = config_and_inputs __A = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) snake_case_ = (BioGptForCausalLM,) if is_torch_available() else () snake_case_ = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = False def UpperCamelCase_ ( self : int ): __A = BioGptModelTester(self ) __A = ConfigTester(self ,config_class=A ,hidden_size=37 ) def UpperCamelCase_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Union[str, Any] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self : List[Any] ): __A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __A = type self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self : Optional[int] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A ) def UpperCamelCase_ ( self : Dict ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*A ,gradient_checkpointing=A ) def UpperCamelCase_ ( self : Tuple ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A ) def UpperCamelCase_ ( self : str ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*A ) def UpperCamelCase_ ( self : Optional[Any] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*A ) @slow def UpperCamelCase_ ( self : List[Any] ): __A = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(A ) __A = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __A = "left" # Define PAD Token = EOS Token = 50256 __A = tokenizer.eos_token __A = model.config.eos_token_id # use different length sentences to test batching __A = [ "Hello, my dog is a little", "Today, I", ] __A = tokenizer(A ,return_tensors="pt" ,padding=A ) __A = inputs["input_ids"].to(A ) __A = model.generate( input_ids=A ,attention_mask=inputs["attention_mask"].to(A ) ,) __A = tokenizer(sentences[0] ,return_tensors="pt" ).input_ids.to(A ) __A = model.generate(input_ids=A ) __A = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() __A = tokenizer(sentences[1] ,return_tensors="pt" ).input_ids.to(A ) __A = model.generate(input_ids=A ,max_length=model.config.max_length - num_paddings ) __A = tokenizer.batch_decode(A ,skip_special_tokens=A ) __A = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=A ) __A = tokenizer.decode(output_padded[0] ,skip_special_tokens=A ) __A = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(A ,A ) self.assertListEqual(A ,[non_padded_sentence, padded_sentence] ) @slow def UpperCamelCase_ ( self : str ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A = BioGptModel.from_pretrained(A ) self.assertIsNotNone(A ) def UpperCamelCase_ ( self : Union[str, Any] ): __A , __A = self.model_tester.prepare_config_and_inputs_for_common() __A = 3 __A = input_dict["input_ids"] __A = input_ids.ne(1 ).to(A ) __A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) __A = BioGptForSequenceClassification(A ) model.to(A ) model.eval() __A = model(A ,attention_mask=A ,labels=A ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase_ ( self : List[Any] ): __A , __A = self.model_tester.prepare_config_and_inputs_for_common() __A = 3 __A = "multi_label_classification" __A = input_dict["input_ids"] __A = input_ids.ne(1 ).to(A ) __A = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) __A = BioGptForSequenceClassification(A ) model.to(A ) model.eval() __A = model(A ,attention_mask=A ,labels=A ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self : Tuple ): __A = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) __A = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) __A = model(A )[0] __A = 4_23_84 __A = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape ,A ) __A = torch.tensor( [[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,A ,atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : str ): __A = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __A = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(A ) torch.manual_seed(0 ) __A = tokenizer("COVID-19 is" ,return_tensors="pt" ).to(A ) __A = model.generate( **A ,min_length=1_00 ,max_length=10_24 ,num_beams=5 ,early_stopping=A ,) __A = tokenizer.decode(output_ids[0] ,skip_special_tokens=A ) __A = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(A ,A )
15
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 snake_case_ = 42 snake_case_ = None class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 2 @register_to_config def __init__( self : str ,A : float = 0.02 ,A : float = 1_00 ,A : float = 1.0_07 ,A : float = 80 ,A : float = 0.05 ,A : float = 50 ,): # standard deviation of the initial noise distribution __A = sigma_max # setable values __A = None __A = None __A = None # sigma(t_i) def UpperCamelCase_ ( self : str ,A : torch.FloatTensor ,A : Optional[int] = None ): return sample def UpperCamelCase_ ( self : Dict ,A : int ,A : Union[str, torch.device] = None ): __A = num_inference_steps __A = np.arange(0 ,self.num_inference_steps )[::-1].copy() __A = torch.from_numpy(A ).to(A ) __A = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __A = torch.tensor(A ,dtype=torch.floataa ,device=A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : torch.FloatTensor ,A : float ,A : Optional[torch.Generator] = None ): if self.config.s_min <= sigma <= self.config.s_max: __A = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 ) else: __A = 0 # sample eps ~ N(0, S_noise^2 * I) __A = self.config.s_noise * randn_tensor(sample.shape ,generator=A ).to(sample.device ) __A = sigma + gamma * sigma __A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase_ ( self : Dict ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_hat + sigma_hat * model_output __A = (sample_hat - pred_original_sample) / sigma_hat __A = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : Optional[int] ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_prev + sigma_prev * model_output __A = (sample_prev - pred_original_sample) / sigma_prev __A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : List[str] ,A : str ): raise NotImplementedError()
15
1
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow SCREAMING_SNAKE_CASE :List[Any] = False class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : Optional[Any] ,A : Optional[Any]=32 ): set_seed(0 ) __A = UNetaDModel(sample_size=A ,in_channels=3 ,out_channels=3 ) __A = torch.optim.SGD(model.parameters() ,lr=0.00_01 ) return model, optimizer @slow def UpperCamelCase_ ( self : Dict ): __A = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable __A = DDPMScheduler( num_train_timesteps=10_00 ,beta_start=0.00_01 ,beta_end=0.02 ,beta_schedule="linear" ,clip_sample=A ,) __A = DDIMScheduler( num_train_timesteps=10_00 ,beta_start=0.00_01 ,beta_end=0.02 ,beta_schedule="linear" ,clip_sample=A ,) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) __A = [torch.randn((4, 3, 32, 32) ).clip(-1 ,1 ).to(A ) for _ in range(4 )] __A = [torch.randn((4, 3, 32, 32) ).to(A ) for _ in range(4 )] __A = [torch.randint(0 ,10_00 ,(4,) ).long().to(A ) for _ in range(4 )] # train with a DDPM scheduler __A , __A = self.get_model_optimizer(resolution=32 ) model.train().to(A ) for i in range(4 ): optimizer.zero_grad() __A = ddpm_scheduler.add_noise(clean_images[i] ,noise[i] ,timesteps[i] ) __A = model(A ,timesteps[i] ).sample __A = torch.nn.functional.mse_loss(A ,noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM __A , __A = self.get_model_optimizer(resolution=32 ) model.train().to(A ) for i in range(4 ): optimizer.zero_grad() __A = ddim_scheduler.add_noise(clean_images[i] ,noise[i] ,timesteps[i] ) __A = model(A ,timesteps[i] ).sample __A = torch.nn.functional.mse_loss(A ,noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(A ,A ,atol=1E-5 ) ) self.assertTrue(torch.allclose(A ,A ,atol=1E-5 ) )
15
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version SCREAMING_SNAKE_CASE :Union[str, Any] = get_logger(__name__) class UpperCAmelCase : '''simple docstring''' snake_case_ = "dummy_data" snake_case_ = "datasets" snake_case_ = False def __init__( self : Optional[int] ,A : str ,A : str ,A : Union[Version, str] ,A : Optional[str] = None ,A : bool = False ,A : bool = True ,A : Optional[List[Callable]] = None ,): __A = 0 __A = dataset_name __A = cache_dir __A = use_local_dummy_data __A = config # download_callbacks take a single url as input __A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root __A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general __A = str(A ) # to be downloaded __A = None __A = None @property def UpperCamelCase_ ( self : Union[str, Any] ): if self._dummy_file is None: __A = self.download_dummy_data() return self._dummy_file @property def UpperCamelCase_ ( self : Optional[Any] ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def UpperCamelCase_ ( self : List[Any] ): return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def UpperCamelCase_ ( self : Tuple ): __A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) __A = cached_path( A ,cache_dir=self.cache_dir ,extract_compressed_file=A ,force_extract=A ) return os.path.join(A ,self.dummy_file_name ) @property def UpperCamelCase_ ( self : str ): return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def UpperCamelCase_ ( self : Any ): if self._bucket_url is None: __A = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def UpperCamelCase_ ( self : Tuple ): # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,*A : Dict ): if self.load_existing_dummy_data: # dummy data is downloaded and tested __A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned __A = self.dummy_file_name # special case when data_url is a dict if isinstance(A ,A ): return self.create_dummy_data_dict(A ,A ) elif isinstance(A ,(list, tuple) ): return self.create_dummy_data_list(A ,A ) else: return self.create_dummy_data_single(A ,A ) def UpperCamelCase_ ( self : str ,A : List[Any] ,*A : List[Any] ): return self.download_and_extract(A ) def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Tuple ): return self.download_and_extract(A ) def UpperCamelCase_ ( self : Any ,A : Any ,*A : Optional[Any] ,**A : List[str] ): return path def UpperCamelCase_ ( self : str ): return {} def UpperCamelCase_ ( self : int ,A : int ,A : Tuple ): __A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(A ,A ): for single_url in single_urls: download_callback(A ) else: __A = single_urls download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(A ,A ): __A = [os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) for x in single_urls] else: __A = single_urls __A = os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) __A = value # make sure that values are unique if all(isinstance(A ,A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique __A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : str ): __A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one __A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,A ) ) for url in data_url ) __A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): __A = [data_url[0]] * len(A ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __A = os.path.join(A ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(A ) return dummy_data_list def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ): for download_callback in self.download_callbacks: download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __A = os.path.join(A ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(A ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCamelCase_ ( self : int ): pass def UpperCamelCase_ ( self : Dict ): pass def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ): def _iter_archive_members(A : Optional[Any] ): # this preserves the order of the members inside the ZIP archive __A = Path(self.dummy_file ).parent __A = path.relative_to(A ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: __A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(A ) __A = Path(A ) __A = _iter_archive_members(A ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(A ).as_posix(), file_path.open("rb" ) def UpperCamelCase_ ( self : List[Any] ,A : Any ): if not isinstance(A ,A ): __A = [paths] for path in paths: if os.path.isfile(A ): if os.path.basename(A ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(A ): if os.path.basename(A ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(A ): if filename.startswith((".", "__") ): continue yield os.path.join(A ,A )
15
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter SCREAMING_SNAKE_CASE :Union[str, Any] = 'Create a default config file for Accelerate with only a few flags set.' def UpperCAmelCase ( a_="no" , a_ = default_json_config_file , a_ = False ) -> Optional[int]: """simple docstring""" __A = Path(a_ ) path.parent.mkdir(parents=a_ , exist_ok=a_ ) if path.exists(): print( F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False __A = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) __A = { "compute_environment": "LOCAL_MACHINE", "mixed_precision": mixed_precision, } if torch.cuda.is_available(): __A = torch.cuda.device_count() __A = num_gpus __A = False if num_gpus > 1: __A = "MULTI_GPU" else: __A = "NO" elif is_xpu_available() and use_xpu: __A = torch.xpu.device_count() __A = num_xpus __A = False if num_xpus > 1: __A = "MULTI_XPU" else: __A = "NO" elif is_npu_available(): __A = torch.npu.device_count() __A = num_npus __A = False if num_npus > 1: __A = "MULTI_NPU" else: __A = "NO" else: __A = 0 __A = True __A = 1 __A = "NO" __A = ClusterConfig(**a_ ) config.to_json_file(a_ ) return path def UpperCAmelCase ( a_ , a_ ) -> List[Any]: """simple docstring""" __A = parser.add_parser("default" , parents=a_ , help=a_ , formatter_class=a_ ) parser.add_argument( "--config_file" , default=a_ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , dest="save_location" , ) parser.add_argument( "--mixed_precision" , choices=["no", "fp16", "bf16"] , type=a_ , help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , ) parser.set_defaults(func=a_ ) return parser def UpperCAmelCase ( a_ ) -> Dict: """simple docstring""" __A = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'''accelerate configuration saved at {config_file}''' )
15
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE :List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :List[str] = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
1
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : int ): __A = logging.get_logger() # the current default level is logging.WARNING __A = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() ) # restore to the original level logging.set_verbosity(A ) def UpperCamelCase_ ( self : Dict ): __A = logging.get_verbosity() __A = logging.get_logger("transformers.models.bart.tokenization_bart" ) __A = "Testing 1, 2, 3" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(A ) as cl: logger.warning(A ) self.assertEqual(cl.out ,msg + "\n" ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(A ) as cl: logger.warning(A ) self.assertEqual(cl.out ,"" ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(A ) as cl: logger.warning(A ) self.assertEqual(cl.out ,msg + "\n" ) # restore to the original level logging.set_verbosity(A ) @mockenv(TRANSFORMERS_VERBOSITY="error" ) def UpperCamelCase_ ( self : Optional[Any] ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __A = logging.get_logger("transformers.models.bart.tokenization_bart" ) __A = os.getenv("TRANSFORMERS_VERBOSITY" ,A ) __A = logging.log_levels[env_level_str] __A = logging.get_verbosity() self.assertEqual( A ,A ,f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' ,) # restore to the original level __A = "" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="super-error" ) def UpperCamelCase_ ( self : Tuple ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __A = logging.logging.getLogger() with CaptureLogger(A ) as cl: # this action activates the env var logging.get_logger("transformers.models.bart.tokenization_bart" ) self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" ,cl.out ) # no need to restore as nothing was changed def UpperCamelCase_ ( self : int ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __A = logging.get_logger("transformers.models.bart.tokenization_bart" ) __A = "Testing 1, 2, 3" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ): # nothing should be logged as env var disables this method with CaptureLogger(A ) as cl: logger.warning_advice(A ) self.assertEqual(cl.out ,"" ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(A ) as cl: logger.warning_advice(A ) self.assertEqual(cl.out ,msg + "\n" ) def UpperCAmelCase ( ) -> List[str]: """simple docstring""" disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
15
from typing import Dict, Optional import numpy as np import datasets SCREAMING_SNAKE_CASE :List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' SCREAMING_SNAKE_CASE :List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' SCREAMING_SNAKE_CASE :str = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Tuple: """simple docstring""" if label_map is not None: for old_id, new_id in label_map.items(): __A = new_id # turn into Numpy arrays __A = np.array(a_ ) __A = np.array(a_ ) if reduce_labels: __A = 2_5_5 __A = label - 1 __A = 2_5_5 __A = label != ignore_index __A = np.not_equal(a_ , a_ ) __A = pred_label[mask] __A = np.array(a_ )[mask] __A = pred_label[pred_label == label] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Union[str, Any]: """simple docstring""" __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(a_ , a_ ): __A , __A , __A , __A = intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = None , a_ = False , ) -> str: """simple docstring""" __A , __A , __A , __A = total_intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) # compute metrics __A = {} __A = total_area_intersect.sum() / total_area_label.sum() __A = total_area_intersect / total_area_union __A = total_area_intersect / total_area_label __A = np.nanmean(a_ ) __A = np.nanmean(a_ ) __A = all_acc __A = iou __A = acc if nan_to_num is not None: __A = {metric: np.nan_to_num(a_ , nan=a_ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), } ) ,reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ] ,) def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Optional[Any] ,A : int ,A : bool ,A : Optional[int] = None ,A : Optional[Dict[int, int]] = None ,A : bool = False ,): __A = mean_iou( results=A ,gt_seg_maps=A ,num_labels=A ,ignore_index=A ,nan_to_num=A ,label_map=A ,reduce_labels=A ,) return iou_result
15
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = ["pixel_values"] def __init__( self : List[str] ,A : bool = True ,A : Dict[str, int] = None ,A : PILImageResampling = PIL.Image.BICUBIC ,A : bool = True ,A : Dict[str, int] = None ,A : Union[int, float] = 1 / 2_55 ,A : bool = True ,A : bool = True ,A : Optional[Union[float, List[float]]] = None ,A : Optional[Union[float, List[float]]] = None ,**A : Optional[int] ,): super().__init__(**A ) __A = size if size is not None else {"height": 2_56, "width": 2_56} __A = get_size_dict(A ) __A = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} __A = get_size_dict(A ,param_name="crop_size" ) __A = do_resize __A = size __A = resample __A = do_center_crop __A = crop_size __A = do_rescale __A = rescale_factor __A = do_normalize __A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __A = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase_ ( self : Optional[Any] ,A : np.ndarray ,A : Dict[str, int] ,A : PILImageResampling = PIL.Image.BICUBIC ,A : Optional[Union[str, ChannelDimension]] = None ,**A : Optional[Any] ,): __A = get_size_dict(A ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return resize( A ,size=(size["height"], size["width"]) ,resample=A ,data_format=A ,**A ) def UpperCamelCase_ ( self : Optional[int] ,A : np.ndarray ,A : Dict[str, int] ,A : Optional[Union[str, ChannelDimension]] = None ,**A : Optional[int] ,): __A = get_size_dict(A ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(A ,size=(size["height"], size["width"]) ,data_format=A ,**A ) def UpperCamelCase_ ( self : int ,A : np.ndarray ,A : Union[int, float] ,A : Optional[Union[str, ChannelDimension]] = None ,**A : Any ,): return rescale(A ,scale=A ,data_format=A ,**A ) def UpperCamelCase_ ( self : Tuple ,A : np.ndarray ,A : Union[float, List[float]] ,A : Union[float, List[float]] ,A : Optional[Union[str, ChannelDimension]] = None ,**A : Union[str, Any] ,): return normalize(A ,mean=A ,std=A ,data_format=A ,**A ) def UpperCamelCase_ ( self : Tuple ,A : ImageInput ,A : bool = None ,A : Dict[str, int] = None ,A : Optional[Any]=None ,A : bool = None ,A : Dict[str, int] = None ,A : bool = None ,A : float = None ,A : bool = None ,A : Optional[Union[float, List[float]]] = None ,A : Optional[Union[float, List[float]]] = None ,A : Optional[Union[str, TensorType]] = None ,A : ChannelDimension = ChannelDimension.FIRST ,**A : Optional[Any] ,): __A = do_resize if do_resize is not None else self.do_resize __A = resample if resample is not None else self.resample __A = do_center_crop if do_center_crop is not None else self.do_center_crop __A = do_rescale if do_rescale is not None else self.do_rescale __A = rescale_factor if rescale_factor is not None else self.rescale_factor __A = do_normalize if do_normalize is not None else self.do_normalize __A = image_mean if image_mean is not None else self.image_mean __A = image_std if image_std is not None else self.image_std __A = size if size is not None else self.size __A = get_size_dict(A ) __A = crop_size if crop_size is not None else self.crop_size __A = get_size_dict(A ,param_name="crop_size" ) __A = make_list_of_images(A ) if not valid_images(A ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __A = [to_numpy_array(A ) for image in images] if do_resize: __A = [self.resize(image=A ,size=A ,resample=A ) for image in images] if do_center_crop: __A = [self.center_crop(image=A ,size=A ) for image in images] if do_rescale: __A = [self.rescale(image=A ,scale=A ) for image in images] if do_normalize: __A = [self.normalize(image=A ,mean=A ,std=A ) for image in images] __A = [to_channel_dimension_format(A ,A ) for image in images] __A = {"pixel_values": images} return BatchFeature(data=A ,tensor_type=A )
15
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :List[str] = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE :Dict = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } SCREAMING_SNAKE_CASE :Optional[Any] = { 'AI-Sweden/gpt-sw3-126m': 2048, 'AI-Sweden/gpt-sw3-350m': 2048, 'AI-Sweden/gpt-sw3-1.6b': 2048, 'AI-Sweden/gpt-sw3-6.7b': 2048, 'AI-Sweden/gpt-sw3-20b': 2048, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] def __init__( self : Optional[int] ,A : Optional[Any] ,A : Optional[int]=False ,A : int=False ,A : Union[str, Any]=False ,A : int=None ,A : Optional[Any]=None ,A : Union[str, Any]=None ,A : Optional[Any]=None ,A : Optional[Dict[str, Any]] = None ,**A : Tuple ,): __A = {} if sp_model_kwargs is None else sp_model_kwargs __A = kwargs.get("name_or_path" ) if name_or_path is None: logger.warning( "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b," " you are testing the model, this can safely be ignored" ) __A = "None" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __A = "<|endoftext|>" if eos_token is None else eos_token __A = "<unk>" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __A = unk_token if pad_token is None else pad_token __A = eos_token if bos_token is None else bos_token else: __A = "<pad>" if pad_token is None else pad_token __A = "<s>" if bos_token is None else bos_token super().__init__( do_lower_case=A ,remove_space=A ,keep_accents=A ,bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = do_lower_case __A = remove_space __A = keep_accents __A = vocab_file __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) # Used for whitespace normalization in input texts # fmt : off __A = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __A = re.compile( f'''[{''.join(map(A ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(1_27 ,1_60 ) ) + [1_60, 1_73, 82_03] ) )}]''' ) def __getstate__( self : Optional[int] ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : Optional[Any] ,A : Union[str, Any] ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase_ ( self : List[str] ): return len(self.sp_model ) def UpperCamelCase_ ( self : int ,A : str ): __A = self.non_printing_characters_re.sub("" ,A ) # Normalize whitespaces __A = "".join([char if char not in self.whitespaces else " " for char in text] ) # NFC Unicode normalization __A = unicodedata.normalize("NFC" ,A ) return text def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,**A : Optional[int] ): __A = self.preprocess_text(A ) return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.PieceToId(A ) def UpperCamelCase_ ( self : Dict ,A : int ): return self.sp_model.IdToPiece(A ) @staticmethod def UpperCamelCase_ ( A : str ): return out_string def UpperCamelCase_ ( self : str ,A : List[str] ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string def UpperCamelCase_ ( self : str ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self : List[str] ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self : Union[str, Any] ,A : Union[str, List[str]] ,A : Union[str, bool] = False ): if isinstance(A ,A ): __A = self.preprocess_text(A ) __A = self.sp_model.encode(A ) else: __A = [self.preprocess_text(A ) for t in text] __A = self.sp_model.encode(A ) if return_tensors is True or return_tensors == "pt": __A = torch.tensor(A ) return token_ids def UpperCamelCase_ ( self : List[Any] ,A : Union[int, List[int]] ): return self.sp_model.decode(A ) def UpperCamelCase_ ( self : List[str] ,A : "Conversation" ): __A = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] __A = ( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(A ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=A )
15
1
import inspect import unittest class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : List[Any] ): try: import diffusers # noqa: F401 except ImportError: assert False def UpperCamelCase_ ( self : str ): import diffusers from diffusers.dependency_versions_table import deps __A = inspect.getmembers(A ,inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": __A = "k-diffusion" elif backend == "invisible_watermark": __A = "invisible-watermark" assert backend in deps, f'''{backend} is not in the deps table!'''
15
import numpy as np def UpperCAmelCase ( a_ , a_ , a_ = 1E-12 , a_ = 1_0_0 , ) -> tuple[float, np.ndarray]: """simple docstring""" assert np.shape(a_ )[0] == np.shape(a_ )[1] # Ensure proper dimensionality. assert np.shape(a_ )[0] == np.shape(a_ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ ) __A = np.iscomplexobj(a_ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(a_ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __A = False __A = 0 __A = 0 __A = 1E12 while not convergence: # Multiple matrix by the vector. __A = np.dot(a_ , a_ ) # Normalize the resulting output vector. __A = w / np.linalg.norm(a_ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __A = vector.conj().T if is_complex else vector.T __A = np.dot(a_ , np.dot(a_ , a_ ) ) # Check convergence. __A = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __A = True __A = lambda_ if is_complex: __A = np.real(lambda_ ) return lambda_, vector def UpperCAmelCase ( ) -> None: """simple docstring""" __A = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] ) __A = np.array([4_1, 4, 2_0] ) __A = real_input_matrix.astype(np.complexaaa ) __A = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __A = np.array([4_1, 4, 2_0] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __A = real_input_matrix __A = real_vector elif problem_type == "complex": __A = complex_input_matrix __A = complex_vector # Our implementation. __A , __A = power_iteration(a_ , a_ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __A , __A = np.linalg.eigh(a_ ) # Last eigenvalue is the maximum one. __A = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __A = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
15
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## SCREAMING_SNAKE_CASE :Optional[int] = 16 SCREAMING_SNAKE_CASE :int = 32 def UpperCAmelCase ( a_ , a_ = 1_6 ) -> str: """simple docstring""" __A = AutoTokenizer.from_pretrained("bert-base-cased" ) __A = load_dataset("glue" , "mrpc" ) def tokenize_function(a_ ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __A = datasets.map( a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __A = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(a_ ): # On TPU it's best to pad everything to the same length or training will be very slow. __A = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __A = 1_6 elif accelerator.mixed_precision != "no": __A = 8 else: __A = None return tokenizer.pad( a_ , padding="longest" , max_length=a_ , pad_to_multiple_of=a_ , return_tensors="pt" , ) # Instantiate dataloaders. __A = DataLoader( tokenized_datasets["train"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) __A = DataLoader( tokenized_datasets["validation"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders SCREAMING_SNAKE_CASE :int = mocked_dataloaders # noqa: F811 def UpperCAmelCase ( a_ , a_ ) -> Tuple: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , a_ ) == "1": __A = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: __A = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir ) else: __A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __A = config["lr"] __A = int(config["num_epochs"] ) __A = int(config["seed"] ) __A = int(config["batch_size"] ) set_seed(a_ ) __A , __A = get_dataloaders(a_ , a_ ) __A = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation __A = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __A = batch_size // MAX_GPU_BATCH_SIZE __A = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) __A = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=a_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __A = model.to(accelerator.device ) # Instantiate optimizer __A = AdamW(params=model.parameters() , lr=a_ ) # Instantiate scheduler __A = get_linear_schedule_with_warmup( optimizer=a_ , num_warmup_steps=1_0_0 , num_training_steps=(len(a_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __A , __A , __A , __A , __A = accelerator.prepare( a_ , a_ , a_ , a_ , a_ ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: __A = os.path.split(a_ )[-1].split("." )[0] accelerator.init_trackers(a_ , a_ ) # Now we train the model for epoch in range(a_ ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: __A = 0 for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __A = model(**a_ ) __A = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() __A = loss / gradient_accumulation_steps accelerator.backward(a_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): __A = model(**a_ ) __A = outputs.logits.argmax(dim=-1 ) __A , __A = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=a_ , references=a_ , ) __A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , a_ ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(a_ ), "epoch": epoch, } , step=a_ , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def UpperCAmelCase ( ) -> Tuple: """simple docstring""" __A = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=a_ , default=a_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) parser.add_argument( "--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , ) parser.add_argument( "--project_dir" , type=a_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , ) __A = parser.parse_args() __A = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6} training_function(a_ , a_ ) if __name__ == "__main__": main()
15
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE :str = 'RegNetConfig' # Base docstring SCREAMING_SNAKE_CASE :List[str] = 'facebook/regnet-y-040' SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 1088, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE :Optional[int] = 'facebook/regnet-y-040' SCREAMING_SNAKE_CASE :Any = 'tabby, tabby cat' SCREAMING_SNAKE_CASE :Optional[int] = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple ,A : int ,A : int = 3 ,A : int = 1 ,A : int = 1 ,A : Optional[str] = "relu" ,**A : Dict ,): super().__init__(**A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __A = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __A = tf.keras.layers.ConvaD( filters=A ,kernel_size=A ,strides=A ,padding="VALID" ,groups=A ,use_bias=A ,name="convolution" ,) __A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" ) __A = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : List[Any] ,A : Any ): __A = self.convolution(self.padding(A ) ) __A = self.normalization(A ) __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple ,A : RegNetConfig ,**A : str ): super().__init__(**A ) __A = config.num_channels __A = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="embedder" ,) def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ): __A = shape_list(A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __A = tf.transpose(A ,perm=(0, 2, 3, 1) ) __A = self.embedder(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Optional[int] ,A : int ,A : int = 2 ,**A : Tuple ): super().__init__(**A ) __A = tf.keras.layers.ConvaD( filters=A ,kernel_size=1 ,strides=A ,use_bias=A ,name="convolution" ) __A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" ) def UpperCamelCase_ ( self : Union[str, Any] ,A : tf.Tensor ,A : bool = False ): return self.normalization(self.convolution(A ) ,training=A ) class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Dict ,A : int ,A : int ,**A : str ): super().__init__(**A ) __A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" ) __A = [ tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="relu" ,name="attention.0" ), tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="sigmoid" ,name="attention.2" ), ] def UpperCamelCase_ ( self : Dict ,A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __A = self.pooler(A ) for layer_module in self.attention: __A = layer_module(A ) __A = hidden_state * pooled return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : Optional[int] ): super().__init__(**A ) __A = in_channels != out_channels or stride != 1 __A = max(1 ,out_channels // config.groups_width ) __A = ( TFRegNetShortCut(A ,stride=A ,name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" ,name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __A = [ TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ), TFRegNetConvLayer( A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ), TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.2" ), ] __A = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : int ,A : Optional[int] ): __A = hidden_state for layer_module in self.layers: __A = layer_module(A ) __A = self.shortcut(A ) hidden_state += residual __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[Any] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : str ): super().__init__(**A ) __A = in_channels != out_channels or stride != 1 __A = max(1 ,out_channels // config.groups_width ) __A = ( TFRegNetShortCut(A ,stride=A ,name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" ,name="shortcut" ) ) __A = [ TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ), TFRegNetConvLayer( A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ), TFRegNetSELayer(A ,reduced_channels=int(round(in_channels / 4 ) ) ,name="layer.2" ), TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.3" ), ] __A = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict ,A : Any ): __A = hidden_state for layer_module in self.layers: __A = layer_module(A ) __A = self.shortcut(A ) hidden_state += residual __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 2 ,A : int = 2 ,**A : Optional[int] ): super().__init__(**A ) __A = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __A = [ # downsampling is done in the first layer with stride of 2 layer(A ,A ,A ,stride=A ,name="layers.0" ), *[layer(A ,A ,A ,name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Any ,A : List[str] ): for layer_module in self.layers: __A = layer_module(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any ,A : RegNetConfig ,**A : List[str] ): super().__init__(**A ) __A = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( A ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="stages.0" ,) ) __A = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(A ,config.depths[1:] ) ): self.stages.append(TFRegNetStage(A ,A ,A ,depth=A ,name=f'''stages.{i+1}''' ) ) def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor ,A : bool = False ,A : bool = True ): __A = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __A = hidden_states + (hidden_state,) __A = stage_module(A ) if output_hidden_states: __A = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=A ,hidden_states=A ) @keras_serializable class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' snake_case_ = RegNetConfig def __init__( self : int ,A : Optional[int] ,**A : Dict ): super().__init__(**A ) __A = config __A = TFRegNetEmbeddings(A ,name="embedder" ) __A = TFRegNetEncoder(A ,name="encoder" ) __A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" ) @unpack_inputs def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : bool = False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.embedder(A ,training=A ) __A = self.encoder( A ,output_hidden_states=A ,return_dict=A ,training=A ) __A = encoder_outputs[0] __A = self.pooler(A ) # Change to NCHW output format have uniformity in the modules __A = tf.transpose(A ,perm=(0, 3, 1, 2) ) __A = tf.transpose(A ,perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __A = tuple([tf.transpose(A ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A ,pooler_output=A ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = RegNetConfig snake_case_ = "regnet" snake_case_ = "pixel_values" @property def UpperCamelCase_ ( self : Optional[Any] ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )} SCREAMING_SNAKE_CASE :Dict = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' SCREAMING_SNAKE_CASE :Dict = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,A : RegNetConfig ,*A : List[Any] ,**A : str ): super().__init__(A ,*A ,**A ) __A = TFRegNetMainLayer(A ,name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : int=False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.regnet( pixel_values=A ,output_hidden_states=A ,return_dict=A ,training=A ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[int] ,A : RegNetConfig ,*A : str ,**A : Tuple ): super().__init__(A ,*A ,**A ) __A = config.num_labels __A = TFRegNetMainLayer(A ,name="regnet" ) # classification head __A = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor = None ,A : tf.Tensor = None ,A : bool = None ,A : bool = None ,A : Union[str, Any]=False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.regnet( A ,output_hidden_states=A ,return_dict=A ,training=A ) __A = outputs.pooler_output if return_dict else outputs[1] __A = self.classifier[0](A ) __A = self.classifier[1](A ) __A = None if labels is None else self.hf_compute_loss(labels=A ,logits=A ) if not return_dict: __A = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=A ,logits=A ,hidden_states=outputs.hidden_states )
15
1
from __future__ import annotations from math import pi def UpperCAmelCase ( a_ , a_ , a_ ) -> dict[str, float]: """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
15
import math def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list: """simple docstring""" __A = end or len(a_ ) for i in range(a_ , a_ ): __A = i __A = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __A = array[temp_index - 1] temp_index -= 1 __A = temp_index_value return array def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap """simple docstring""" __A = index __A = 2 * index + 1 # Left Node __A = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __A = left_index if right_index < heap_size and array[largest] < array[right_index]: __A = right_index if largest != index: __A , __A = array[largest], array[index] heapify(a_ , a_ , a_ ) def UpperCAmelCase ( a_ ) -> list: """simple docstring""" __A = len(a_ ) for i in range(n // 2 , -1 , -1 ): heapify(a_ , a_ , a_ ) for i in range(n - 1 , 0 , -1 ): __A , __A = array[0], array[i] heapify(a_ , 0 , a_ ) return array def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" __A = low __A = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __A , __A = array[j], array[i] i += 1 def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) == 0: return array __A = 2 * math.ceil(math.loga(len(a_ ) ) ) __A = 1_6 return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ ) def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(a_ ) max_depth -= 1 __A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 ) __A = partition(a_ , a_ , a_ , a_ ) intro_sort(a_ , a_ , a_ , a_ , a_ ) __A = p return insertion_sort(a_ , a_ , a_ ) if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip() SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')] print(sort(unsorted))
15
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = LDMTextToImagePipeline snake_case_ = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } snake_case_ = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS snake_case_ = False def UpperCamelCase_ ( self : Optional[int] ): torch.manual_seed(0 ) __A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,) __A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule="scaled_linear" ,clip_sample=A ,set_alpha_to_one=A ,) torch.manual_seed(0 ) __A = AutoencoderKL( block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") ,up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") ,latent_channels=4 ,) torch.manual_seed(0 ) __A = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) __A = CLIPTextModel(A ) __A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __A = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def UpperCamelCase_ ( self : Any ,A : Any ,A : List[Any]=0 ): if str(A ).startswith("mps" ): __A = torch.manual_seed(A ) else: __A = torch.Generator(device=A ).manual_seed(A ) __A = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCamelCase_ ( self : Dict ): __A = "cpu" # ensure determinism for the device-dependent torch.Generator __A = self.get_dummy_components() __A = LDMTextToImagePipeline(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) __A = self.get_dummy_inputs(A ) __A = pipe(**A ).images __A = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) __A = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : str ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self : str ,A : Dict ,A : int=torch.floataa ,A : List[Any]=0 ): __A = torch.manual_seed(A ) __A = np.random.RandomState(A ).standard_normal((1, 4, 32, 32) ) __A = torch.from_numpy(A ).to(device=A ,dtype=A ) __A = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCamelCase_ ( self : Optional[int] ): __A = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(A ) pipe.set_progress_bar_config(disable=A ) __A = self.get_inputs(A ) __A = pipe(**A ).images __A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 2_56, 2_56, 3) __A = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] ) __A = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : List[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Any=torch.floataa ,A : Dict=0 ): __A = torch.manual_seed(A ) __A = np.random.RandomState(A ).standard_normal((1, 4, 32, 32) ) __A = torch.from_numpy(A ).to(device=A ,dtype=A ) __A = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCamelCase_ ( self : Dict ): __A = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(A ) pipe.set_progress_bar_config(disable=A ) __A = self.get_inputs(A ) __A = pipe(**A ).images[0] __A = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) __A = np.abs(expected_image - image ).max() assert max_diff < 1E-3
15
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml SCREAMING_SNAKE_CASE :Optional[int] = NewType('DataClass', Any) SCREAMING_SNAKE_CASE :int = NewType('DataClassType', Any) def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" if isinstance(a_ , a_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' ) def UpperCAmelCase ( a_ ) -> Callable[[str], Any]: """simple docstring""" __A = {str(a_ ): choice for choice in choices} return lambda a_ : str_to_choice.get(a_ , a_ ) def UpperCAmelCase ( *, a_ = None , a_ = None , a_ = dataclasses.MISSING , a_ = dataclasses.MISSING , a_ = None , **a_ , ) -> dataclasses.Field: """simple docstring""" if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls __A = {} if aliases is not None: __A = aliases if help is not None: __A = help return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_ ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 def __init__( self : Union[str, Any] ,A : Union[DataClassType, Iterable[DataClassType]] ,**A : List[Any] ): # To make the default appear when using --help if "formatter_class" not in kwargs: __A = ArgumentDefaultsHelpFormatter super().__init__(**A ) if dataclasses.is_dataclass(A ): __A = [dataclass_types] __A = list(A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(A ) @staticmethod def UpperCamelCase_ ( A : ArgumentParser ,A : dataclasses.Field ): __A = f'''--{field.name}''' __A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type ,A ): raise RuntimeError( "Unresolved type detected, which should have been done with the help of " "`typing.get_type_hints` method by default" ) __A = kwargs.pop("aliases" ,[] ) if isinstance(A ,A ): __A = [aliases] __A = getattr(field.type ,"__origin__" ,field.type ) if origin_type is Union or (hasattr(A ,"UnionType" ) and isinstance(A ,types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(A ) not in field.type.__args__ ): raise ValueError( "Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because" " the argument parser only supports one type per argument." f''' Problem encountered in field \'{field.name}\'.''' ) if type(A ) not in field.type.__args__: # filter `str` in Union __A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] __A = getattr(field.type ,"__origin__" ,field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) __A = ( field.type.__args__[0] if isinstance(A ,field.type.__args__[1] ) else field.type.__args__[1] ) __A = getattr(field.type ,"__origin__" ,field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) __A = {} if origin_type is Literal or (isinstance(field.type ,A ) and issubclass(field.type ,A )): if origin_type is Literal: __A = field.type.__args__ else: __A = [x.value for x in field.type] __A = make_choice_type_function(kwargs["choices"] ) if field.default is not dataclasses.MISSING: __A = field.default else: __A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument __A = copy(A ) # Hack because type=bool in argparse does not behave as we want. __A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. __A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way __A = default # This tells argparse we accept 0 or 1 value after --field_name __A = "?" # This is the value that will get picked if we do --field_name (without value) __A = True elif isclass(A ) and issubclass(A ,A ): __A = field.type.__args__[0] __A = "+" if field.default_factory is not dataclasses.MISSING: __A = field.default_factory() elif field.default is dataclasses.MISSING: __A = True else: __A = field.type if field.default is not dataclasses.MISSING: __A = field.default elif field.default_factory is not dataclasses.MISSING: __A = field.default_factory() else: __A = True parser.add_argument(A ,*A ,**A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): __A = False parser.add_argument(f'''--no_{field.name}''' ,action="store_false" ,dest=field.name ,**A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : DataClassType ): if hasattr(A ,"_argument_group_name" ): __A = self.add_argument_group(dtype._argument_group_name ) else: __A = self try: __A = get_type_hints(A ) except NameError: raise RuntimeError( f'''Type resolution failed for {dtype}. Try declaring the class in global scope or ''' "removing line of `from __future__ import annotations` which opts in Postponed " "Evaluation of Annotations (PEP 563)" ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A ): __A = ".".join(map(A ,sys.version_info[:3] ) ) raise RuntimeError( f'''Type resolution failed for {dtype} on Python {python_version}. Try removing ''' "line of `from __future__ import annotations` which opts in union types as " "`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To " "support Python versions that lower than 3.10, you need to use " "`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of " "`X | None`." ) from ex raise for field in dataclasses.fields(A ): if not field.init: continue __A = type_hints[field.name] self._parse_dataclass_field(A ,A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : List[Any]=None ,A : List[Any]=False ,A : Optional[Any]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=None ,): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): __A = [] if args_filename: args_files.append(Path(A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values __A = ArgumentParser() args_file_parser.add_argument(A ,type=A ,action="append" ) # Use only remaining args for further parsing (remove the args_file_flag) __A , __A = args_file_parser.parse_known_args(args=A ) __A = vars(A ).get(args_file_flag.lstrip("-" ) ,A ) if cmd_args_file_paths: args_files.extend([Path(A ) for p in cmd_args_file_paths] ) __A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last __A = file_args + args if args is not None else file_args + sys.argv[1:] __A , __A = self.parse_known_args(args=A ) __A = [] for dtype in self.dataclass_types: __A = {f.name for f in dataclasses.fields(A ) if f.init} __A = {k: v for k, v in vars(A ).items() if k in keys} for k in keys: delattr(A ,A ) __A = dtype(**A ) outputs.append(A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' ) return (*outputs,) def UpperCamelCase_ ( self : Dict ,A : Dict[str, Any] ,A : bool = False ): __A = set(args.keys() ) __A = [] for dtype in self.dataclass_types: __A = {f.name for f in dataclasses.fields(A ) if f.init} __A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) __A = dtype(**A ) outputs.append(A ) if not allow_extra_keys and unused_keys: raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(A )}''' ) return tuple(A ) def UpperCamelCase_ ( self : List[str] ,A : str ,A : bool = False ): with open(Path(A ) ,encoding="utf-8" ) as open_json_file: __A = json.loads(open_json_file.read() ) __A = self.parse_dict(A ,allow_extra_keys=A ) return tuple(A ) def UpperCamelCase_ ( self : int ,A : str ,A : bool = False ): __A = self.parse_dict(yaml.safe_load(Path(A ).read_text() ) ,allow_extra_keys=A ) return tuple(A )
15
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE :List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :List[str] = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
SCREAMING_SNAKE_CASE :Any = 256 # Modulus to hash a string SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003 def UpperCAmelCase ( a_ , a_ ) -> bool: """simple docstring""" __A = len(a_ ) __A = len(a_ ) if p_len > t_len: return False __A = 0 __A = 0 __A = 1 # Calculating the hash of pattern and substring of text for i in range(a_ ): __A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ) -> None: """simple docstring""" __A = "abc1abc12" __A = "alskfjaldsabc1abc1abc12k23adsfabcabc" __A = "alskfjaldsk23adsfabcabc" assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ ) # Test 2) __A = "ABABX" __A = "ABABZABABYABABX" assert rabin_karp(a_ , a_ ) # Test 3) __A = "AAAB" __A = "ABAAAAAB" assert rabin_karp(a_ , a_ ) # Test 4) __A = "abcdabcy" __A = "abcxabcdabxabcdabcdabcy" assert rabin_karp(a_ , a_ ) # Test 5) __A = "Lü" __A = "Lüsai" assert rabin_karp(a_ , a_ ) __A = "Lue" assert not rabin_karp(a_ , a_ ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
15
1
def UpperCAmelCase ( a_ ) -> bool: """simple docstring""" if not isinstance(a_ , a_ ): __A = F'''Input value of [number={number}] must be an integer''' raise TypeError(a_ ) if number < 0: return False __A = number * number while number > 0: if number % 1_0 != number_square % 1_0: return False number //= 1_0 number_square //= 1_0 return True if __name__ == "__main__": import doctest doctest.testmod()
15
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel SCREAMING_SNAKE_CASE :Union[str, Any] = False SCREAMING_SNAKE_CASE :Any = True SCREAMING_SNAKE_CASE :Tuple = False if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() parser.add_argument( '--repo_path', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE :Dict = { 'image_size': 'sample_size', 'num_res_blocks': 'layers_per_block', 'block_channels': 'block_out_channels', 'down_blocks': 'down_block_types', 'up_blocks': 'up_block_types', 'downscale_freq_shift': 'freq_shift', 'resnet_num_groups': 'norm_num_groups', 'resnet_act_fn': 'act_fn', 'resnet_eps': 'norm_eps', 'num_head_channels': 'attention_head_dim', } SCREAMING_SNAKE_CASE :Optional[int] = { 'time_steps': 'time_proj', 'mid': 'mid_block', 'downsample_blocks': 'down_blocks', 'upsample_blocks': 'up_blocks', } SCREAMING_SNAKE_CASE :int = '' if has_file(args.repo_path, 'config.json') else 'unet' with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader: SCREAMING_SNAKE_CASE :Dict = reader.read() SCREAMING_SNAKE_CASE :List[str] = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, 'config.json'): SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel(**config) else: SCREAMING_SNAKE_CASE :Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel SCREAMING_SNAKE_CASE :List[str] = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) SCREAMING_SNAKE_CASE :List[str] = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: SCREAMING_SNAKE_CASE :Optional[Any] = config[key] del config[key] SCREAMING_SNAKE_CASE :Optional[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']] SCREAMING_SNAKE_CASE :List[Any] = [k.replace('UNetRes', '') for k in config['up_block_types']] if do_only_weights: SCREAMING_SNAKE_CASE :Tuple = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin')) SCREAMING_SNAKE_CASE :Any = {} for param_key, param_value in state_dict.items(): if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'): continue SCREAMING_SNAKE_CASE :List[str] = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('.')[0] == key: SCREAMING_SNAKE_CASE :List[Any] = param_value SCREAMING_SNAKE_CASE :str = True if not has_changed: SCREAMING_SNAKE_CASE :List[str] = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
15
1
import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[Any] ,*A : List[Any] ,**A : int ): warnings.warn( "The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use DeformableDetrImageProcessor instead." ,A ,) super().__init__(*A ,**A )
15
import argparse import math import traceback import dateutil.parser as date_parser import requests def UpperCAmelCase ( a_ ) -> str: """simple docstring""" __A = {} __A = job["started_at"] __A = job["completed_at"] __A = date_parser.parse(a_ ) __A = date_parser.parse(a_ ) __A = round((end_datetime - start_datetime).total_seconds() / 60.0 ) __A = start __A = end __A = duration_in_min return job_info def UpperCAmelCase ( a_ , a_=None ) -> str: """simple docstring""" __A = None if token is not None: __A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} __A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' __A = requests.get(a_ , headers=a_ ).json() __A = {} try: job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} ) __A = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 ) for i in range(a_ ): __A = requests.get(url + F'''&page={i + 2}''' , headers=a_ ).json() job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args() SCREAMING_SNAKE_CASE :Union[str, Any] = get_job_time(args.workflow_run_id) SCREAMING_SNAKE_CASE :Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'''{k}: {v["duration"]}''')
15
1
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline SCREAMING_SNAKE_CASE :Tuple = { 'n_samples': 64, 'horizon': 32, 'num_inference_steps': 20, 'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network 'scale_grad_by_std': True, 'scale': 0.1, 'eta': 0.0, 't_grad_cutoff': 2, 'device': 'cpu', } if __name__ == "__main__": SCREAMING_SNAKE_CASE :List[str] = 'hopper-medium-v2' SCREAMING_SNAKE_CASE :Union[str, Any] = gym.make(env_name) SCREAMING_SNAKE_CASE :List[Any] = ValueGuidedRLPipeline.from_pretrained( 'bglick13/hopper-medium-v2-value-function-hor32', env=env, ) env.seed(0) SCREAMING_SNAKE_CASE :Optional[int] = env.reset() SCREAMING_SNAKE_CASE :Dict = 0 SCREAMING_SNAKE_CASE :Union[str, Any] = 0 SCREAMING_SNAKE_CASE :Any = 1000 SCREAMING_SNAKE_CASE :str = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy SCREAMING_SNAKE_CASE :Any = pipeline(obs, planning_horizon=32) # execute action in environment SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Tuple = env.step(denorm_actions) SCREAMING_SNAKE_CASE :Optional[Any] = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' f''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) SCREAMING_SNAKE_CASE :List[str] = next_observation except KeyboardInterrupt: pass print(f'''Total reward: {total_reward}''')
15
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" __A = args.pruning_method __A = args.threshold __A = args.model_name_or_path.rstrip("/" ) __A = args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) __A = torch.load(os.path.join(a_ , "pytorch_model.bin" ) ) __A = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "bias" in name: __A = tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": __A = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = TopKBinarizer.apply(a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = ThresholdBinarizer.apply(a_ , a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A , __A = -0.1, 1.1 __A = torch.sigmoid(a_ ) __A = s * (r - l) + l __A = s_bar.clamp(min=0.0 , max=1.0 ) __A = tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError("Unknown pruning method" ) if target_model_path is None: __A = os.path.join( os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' ) if not os.path.isdir(a_ ): shutil.copytree(a_ , a_ ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(a_ , os.path.join(a_ , "pytorch_model.bin" ) ) print("\nPruned model saved! See you later!" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) SCREAMING_SNAKE_CASE :str = parser.parse_args() main(args)
15
1
def UpperCAmelCase ( a_ ) -> int: """simple docstring""" __A = len(a_ ) __A = len(matrix[0] ) __A = min(a_ , a_ ) for row in range(a_ ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , a_ ): __A = matrix[col][row] / matrix[row][row] for i in range(a_ , a_ ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows __A = True for i in range(row + 1 , a_ ): if matrix[i][row] != 0: __A , __A = matrix[i], matrix[row] __A = False break if reduce: rank -= 1 for i in range(a_ ): __A = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
15
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :int = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE :Union[str, Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } SCREAMING_SNAKE_CASE :int = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] snake_case_ = [] def __init__( self : Any ,A : List[str] ,A : str="<unk>" ,A : int="<s>" ,A : Union[str, Any]="</s>" ,A : List[str]="<pad>" ,A : int="[SEP]" ,A : Optional[Any]="[MASK]" ,A : Tuple="[CLS]" ,A : Optional[Dict[str, Any]] = None ,**A : Any ,): __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token __A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sep_token=A ,mask_token=A ,cls_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = vocab_file __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) @property def UpperCamelCase_ ( self : List[str] ): return self.sp_model.get_piece_size() def UpperCamelCase_ ( self : Optional[Any] ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[int] ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : str ,A : Optional[Any] ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : List[str] ,A : Tuple ): return self.sp_model.piece_to_id(A ) def UpperCamelCase_ ( self : List[Any] ,A : Tuple ): __A = self.sp_model.IdToPiece(A ) return token def UpperCamelCase_ ( self : List[Any] ,A : int ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string.strip() def UpperCamelCase_ ( self : Tuple ,A : List[int] ,A : bool = False ,A : bool = None ,A : bool = True ,**A : Union[str, Any] ,): __A = kwargs.pop("use_source_tokenizer" ,A ) __A = self.convert_ids_to_tokens(A ,skip_special_tokens=A ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __A = [] __A = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) __A = [] sub_texts.append(A ) else: current_sub_text.append(A ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: __A = re.sub(R" (\[(MASK|SEP)\])" ,R"\1" ," ".join(A ) ) else: __A = "".join(A ) __A = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __A = self.clean_up_tokenization(A ) return clean_text else: return text def UpperCamelCase_ ( self : str ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A = [self.cls_token_id] __A = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self : Optional[int] ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1] def UpperCamelCase_ ( self : Any ,A : List[int] ,A : Optional[List[int]] = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
15
1
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def UpperCAmelCase ( ) -> int: """simple docstring""" __A = argparse.ArgumentParser() parser.add_argument( "-m" , "--pretrained_model_name_or_path" , type=a_ , default=a_ , required=a_ , help="Path to pretrained model or model identifier from huggingface.co/models." , ) parser.add_argument( "-c" , "--caption" , type=a_ , default="robotic cat with wings" , help="Text used to generate images." , ) parser.add_argument( "-n" , "--images_num" , type=a_ , default=4 , help="How much images to generate." , ) parser.add_argument( "-s" , "--seed" , type=a_ , default=4_2 , help="Seed for random process." , ) parser.add_argument( "-ci" , "--cuda_id" , type=a_ , default=0 , help="cuda_id." , ) __A = parser.parse_args() return args def UpperCAmelCase ( a_ , a_ , a_ ) -> Dict: """simple docstring""" if not len(a_ ) == rows * cols: raise ValueError("The specified number of rows and columns are not correct." ) __A , __A = imgs[0].size __A = Image.new("RGB" , size=(cols * w, rows * h) ) __A , __A = grid.size for i, img in enumerate(a_ ): grid.paste(a_ , box=(i % cols * w, i // cols * h) ) return grid def UpperCAmelCase ( a_ , a_="robotic cat with wings" , a_=7.5 , a_=5_0 , a_=1 , a_=4_2 , ) -> Optional[Any]: """simple docstring""" __A = torch.Generator(pipeline.device ).manual_seed(a_ ) __A = pipeline( a_ , guidance_scale=a_ , num_inference_steps=a_ , generator=a_ , num_images_per_prompt=a_ , ).images __A = int(math.sqrt(a_ ) ) __A = image_grid(a_ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images SCREAMING_SNAKE_CASE :str = parse_args() # Load models and create wrapper for stable diffusion SCREAMING_SNAKE_CASE :Any = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') SCREAMING_SNAKE_CASE :Union[str, Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') SCREAMING_SNAKE_CASE :str = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') SCREAMING_SNAKE_CASE :List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') SCREAMING_SNAKE_CASE :str = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) SCREAMING_SNAKE_CASE :Union[str, Any] = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): SCREAMING_SNAKE_CASE :Optional[Any] = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: SCREAMING_SNAKE_CASE :Union[str, Any] = unet.to(torch.device('cuda', args.cuda_id)) SCREAMING_SNAKE_CASE :Union[str, Any] = pipeline.to(unet.device) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :str = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) SCREAMING_SNAKE_CASE :Dict = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
15
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): SCREAMING_SNAKE_CASE :Any = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: SCREAMING_SNAKE_CASE :int = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def UpperCAmelCase ( a_ ) -> Optional[Any]: """simple docstring""" __A = (images / 2 + 0.5).clamp(0 , 1 ) __A = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __A = numpy_to_pil(a_ ) return images def UpperCAmelCase ( a_ ) -> int: """simple docstring""" if images.ndim == 3: __A = images[None, ...] __A = (images * 2_5_5).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images __A = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: __A = [Image.fromarray(a_ ) for image in images] return pil_images
15
1
def UpperCAmelCase ( a_ , a_ ) -> str: """simple docstring""" __A = [[] for _ in range(a_ )] __A = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(a_ ) <= key: return input_string for position, character in enumerate(a_ ): __A = position % (lowest * 2) # puts it in bounds __A = min(a_ , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(a_ ) __A = ["".join(a_ ) for row in temp_grid] __A = "".join(a_ ) return output_string def UpperCAmelCase ( a_ , a_ ) -> str: """simple docstring""" __A = [] __A = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string __A = [[] for _ in range(a_ )] # generates template for position in range(len(a_ ) ): __A = position % (lowest * 2) # puts it in bounds __A = min(a_ , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) __A = 0 for row in temp_grid: # fills in the characters __A = input_string[counter : counter + len(a_ )] grid.append(list(a_ ) ) counter += len(a_ ) __A = "" # reads as zigzag for position in range(len(a_ ) ): __A = position % (lowest * 2) # puts it in bounds __A = min(a_ , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def UpperCAmelCase ( a_ ) -> dict[int, str]: """simple docstring""" __A = {} for key_guess in range(1 , len(a_ ) ): # tries every key __A = decrypt(a_ , a_ ) return results if __name__ == "__main__": import doctest doctest.testmod()
15
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :List[Any] = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "yolos" def __init__( self : Any ,A : Optional[Any]=7_68 ,A : Dict=12 ,A : Any=12 ,A : str=30_72 ,A : Any="gelu" ,A : str=0.0 ,A : List[str]=0.0 ,A : Dict=0.02 ,A : int=1E-12 ,A : Tuple=[5_12, 8_64] ,A : List[Any]=16 ,A : str=3 ,A : str=True ,A : Any=1_00 ,A : Dict=True ,A : Dict=False ,A : Tuple=1 ,A : Union[str, Any]=5 ,A : Optional[Any]=2 ,A : Union[str, Any]=5 ,A : int=2 ,A : int=0.1 ,**A : List[str] ,): super().__init__(**A ) __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = initializer_range __A = layer_norm_eps __A = image_size __A = patch_size __A = num_channels __A = qkv_bias __A = num_detection_tokens __A = use_mid_position_embeddings __A = auxiliary_loss # Hungarian matcher __A = class_cost __A = bbox_cost __A = giou_cost # Loss coefficients __A = bbox_loss_coefficient __A = giou_loss_coefficient __A = eos_coefficient class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = version.parse("1.11" ) @property def UpperCamelCase_ ( self : str ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase_ ( self : List[Any] ): return 1E-4 @property def UpperCamelCase_ ( self : Optional[Any] ): return 12
15
1
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__) def UpperCAmelCase ( a_ , a_ , a_ , a_=None , a_=None ) -> str: """simple docstring""" if "." in tensor_name: __A = tensor_name.split("." ) for split in splits[:-1]: __A = getattr(a_ , a_ ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) __A = new_module __A = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) __A = tensor_name in module._buffers __A = getattr(a_ , a_ ) if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None: raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) __A = False __A = False if is_buffer or not is_bitsandbytes_available(): __A = False __A = False else: __A = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) __A = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: __A = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: __A = old_value.to(a_ ) elif isinstance(a_ , torch.Tensor ): __A = value.to("cpu" ) if value.dtype == torch.inta: __A = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse( "0.37.2" ) if not is_abit_serializable: raise ValueError( "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) else: __A = torch.tensor(a_ , device="cpu" ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , a_ ) and fpaa_statistics is None: __A = new_value.T __A = old_value.__dict__ if is_abit: __A = bnb.nn.IntaParams(a_ , requires_grad=a_ , **a_ ).to(a_ ) elif is_abit: __A = bnb.nn.Paramsabit(a_ , requires_grad=a_ , **a_ ).to(a_ ) __A = new_value if fpaa_statistics is not None: setattr(module.weight , "SCB" , fpaa_statistics.to(a_ ) ) else: if value is None: __A = old_value.to(a_ ) elif isinstance(a_ , torch.Tensor ): __A = value.to(a_ ) else: __A = torch.tensor(a_ , device=a_ ) if is_buffer: __A = new_value else: __A = nn.Parameter(a_ , requires_grad=old_value.requires_grad ) __A = new_value def UpperCAmelCase ( a_ , a_=None , a_=None , a_=None , a_=False ) -> Optional[int]: """simple docstring""" for name, module in model.named_children(): if current_key_name is None: __A = [] current_key_name.append(a_ ) if (isinstance(a_ , nn.Linear ) or isinstance(a_ , a_ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in ".".join(a_ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(a_ , a_ ): __A , __A = module.weight.shape else: __A = module.in_features __A = module.out_features if quantization_config.quantization_method() == "llm_int8": __A = bnb.nn.LinearabitLt( a_ , a_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) __A = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: __A = bnb.nn.Linearabit( a_ , a_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) __A = True # Store the module class in case we need to transpose the weight later __A = type(a_ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(a_ ) if len(list(module.children() ) ) > 0: __A , __A = _replace_with_bnb_linear( a_ , a_ , a_ , a_ , has_been_replaced=a_ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def UpperCAmelCase ( a_ , a_=None , a_=None , a_=None ) -> int: """simple docstring""" __A = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert __A , __A = _replace_with_bnb_linear( a_ , a_ , a_ , a_ ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def UpperCAmelCase ( *a_ , **a_ ) -> List[str]: """simple docstring""" warnings.warn( "`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , a_ , ) return replace_with_bnb_linear(*a_ , **a_ ) def UpperCAmelCase ( *a_ , **a_ ) -> int: """simple docstring""" warnings.warn( "`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , a_ , ) return set_module_quantized_tensor_to_device(*a_ , **a_ ) def UpperCAmelCase ( a_ ) -> List[Any]: """simple docstring""" __A = deepcopy(a_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() __A = find_tied_parameters(a_ ) # For compatibility with Accelerate < 0.18 if isinstance(a_ , a_ ): __A = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: __A = sum(a_ , [] ) __A = len(a_ ) > 0 # Check if it is a base model __A = not hasattr(a_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head __A = list(model.named_children() ) __A = [list_modules[-1][0]] # add last module together with tied weights __A = set(a_ ) - set(a_ ) __A = list(set(a_ ) ) + list(a_ ) # remove ".weight" from the keys __A = [".weight", ".bias"] __A = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: __A = name.replace(a_ , "" ) filtered_module_names.append(a_ ) return filtered_module_names
15
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) SCREAMING_SNAKE_CASE :List[str] = 'pytorch_model.bin' SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json' SCREAMING_SNAKE_CASE :Optional[int] = 'adapter_config.json' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.bin' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.safetensors' SCREAMING_SNAKE_CASE :str = 'tf_model.h5' SCREAMING_SNAKE_CASE :List[Any] = 'tf_model.h5.index.json' SCREAMING_SNAKE_CASE :str = 'model.ckpt' SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack' SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack.index.json' SCREAMING_SNAKE_CASE :Tuple = 'model.safetensors' SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors.index.json' SCREAMING_SNAKE_CASE :str = 'config.json' SCREAMING_SNAKE_CASE :int = 'preprocessor_config.json' SCREAMING_SNAKE_CASE :Optional[Any] = FEATURE_EXTRACTOR_NAME SCREAMING_SNAKE_CASE :Optional[int] = 'generation_config.json' SCREAMING_SNAKE_CASE :List[str] = 'modelcard.json' SCREAMING_SNAKE_CASE :Optional[int] = '▁' SCREAMING_SNAKE_CASE :Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility SCREAMING_SNAKE_CASE :str = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. SCREAMING_SNAKE_CASE :Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] SCREAMING_SNAKE_CASE :List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def UpperCAmelCase ( a_ ) -> Dict: """simple docstring""" if version.parse(a_ ) < version.parse(a_ ): if "dev" in min_version: __A = ( "This example requires a source install from HuggingFace Transformers (see " "`https://huggingface.co/docs/transformers/installation#install-from-source`)," ) else: __A = F'''This example requires a minimum version of {min_version},''' error_message += F''' but the version found is {__version__}.\n''' raise ImportError( error_message + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " "versions of HuggingFace Transformers." )
15
1
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Tuple = { 'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json', } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "transfo-xl" snake_case_ = ["mems"] snake_case_ = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : int ,A : List[str]=26_77_35 ,A : Tuple=[2_00_00, 4_00_00, 20_00_00] ,A : int=10_24 ,A : Any=10_24 ,A : Dict=16 ,A : Union[str, Any]=64 ,A : Union[str, Any]=40_96 ,A : Union[str, Any]=4 ,A : Optional[Any]=False ,A : Any=18 ,A : Optional[int]=16_00 ,A : str=10_00 ,A : Optional[int]=True ,A : Dict=True ,A : Any=0 ,A : int=-1 ,A : str=True ,A : str=0.1 ,A : Dict=0.0 ,A : str=True ,A : Any="normal" ,A : List[str]=0.01 ,A : int=0.01 ,A : Optional[int]=0.02 ,A : int=1E-5 ,A : Optional[Any]=0 ,**A : Optional[int] ,): __A = vocab_size __A = [] self.cutoffs.extend(A ) if proj_share_all_but_first: __A = [False] + [True] * len(self.cutoffs ) else: __A = [False] + [False] * len(self.cutoffs ) __A = d_model __A = d_embed __A = d_head __A = d_inner __A = div_val __A = pre_lnorm __A = n_layer __A = n_head __A = mem_len __A = same_length __A = attn_type __A = clamp_len __A = sample_softmax __A = adaptive __A = dropout __A = dropatt __A = untie_r __A = init __A = init_range __A = proj_init_std __A = init_std __A = layer_norm_epsilon super().__init__(eos_token_id=A ,**A ) @property def UpperCamelCase_ ( self : List[str] ): # Message copied from Transformer-XL documentation logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def UpperCamelCase_ ( self : Any ,A : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
15
def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" __A = [0] * len(a_ ) __A = [] __A = [1] * len(a_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(a_ ) ): if indegree[i] == 0: queue.append(a_ ) while queue: __A = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: __A = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(a_ ) print(max(a_ ) ) # Adjacency list of Graph SCREAMING_SNAKE_CASE :List[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
15
1
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,A : pyspark.sql.DataFrame ,A : Optional[NamedSplit] = None ,A : Optional[Features] = None ,A : bool = True ,A : str = None ,A : bool = False ,A : str = None ,A : bool = True ,A : str = "arrow" ,**A : List[Any] ,): super().__init__( split=A ,features=A ,cache_dir=A ,keep_in_memory=A ,streaming=A ,**A ,) __A = load_from_cache_file __A = file_format __A = Spark( df=A ,features=A ,cache_dir=A ,working_dir=A ,**A ,) def UpperCamelCase_ ( self : Optional[Any] ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) __A = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=A ,file_format=self._file_format ,) return self.builder.as_dataset(split=self.split )
15
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def UpperCAmelCase ( a_ , a_ ) -> Tuple: """simple docstring""" __A = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __A = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" ) __A = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" ) __A = key.replace("heads.cmd.itm_head.cls" , "itm_head" ) __A = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" ) __A = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" ) __A = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" ) __A = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" ) __A = key.replace("mm_text_projection" , "flava.text_to_mm_projection" ) __A = key.replace("mm_image_projection" , "flava.image_to_mm_projection" ) __A = key.replace("image_encoder.module" , "flava.image_model" ) __A = key.replace("text_encoder.module" , "flava.text_model" ) __A = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" ) __A = key.replace("mm_encoder.module" , "flava.multimodal_model" ) __A = key.replace("text_projection" , "flava.text_projection" ) __A = key.replace("image_projection" , "flava.image_projection" ) __A = value.float() for key, value in codebook_state_dict.items(): __A = value return upgrade @torch.no_grad() def UpperCAmelCase ( a_ , a_ , a_ , a_=None ) -> Tuple: """simple docstring""" if config_path is not None: __A = FlavaConfig.from_pretrained(a_ ) else: __A = FlavaConfig() __A = FlavaForPreTraining(a_ ).eval() __A = convert_dalle_checkpoint(a_ , a_ , save_checkpoint=a_ ) if os.path.exists(a_ ): __A = torch.load(a_ , map_location="cpu" ) else: __A = torch.hub.load_state_dict_from_url(a_ , map_location="cpu" ) __A = upgrade_state_dict(a_ , a_ ) hf_model.load_state_dict(a_ ) __A = hf_model.state_dict() __A = count_parameters(a_ ) __A = count_parameters(a_ ) + count_parameters(a_ ) assert torch.allclose(a_ , a_ , atol=1E-3 ) hf_model.save_pretrained(a_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
15
1
def UpperCAmelCase ( a_ ) -> int: """simple docstring""" if n == 1 or not isinstance(a_ , a_ ): return 0 elif n == 2: return 1 else: __A = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def UpperCAmelCase ( a_ ) -> int: """simple docstring""" __A = 0 __A = 2 while digits < n: index += 1 __A = len(str(fibonacci(a_ ) ) ) return index def UpperCAmelCase ( a_ = 1_0_0_0 ) -> int: """simple docstring""" return fibonacci_digits_index(a_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
15
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} SCREAMING_SNAKE_CASE :Tuple = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } SCREAMING_SNAKE_CASE :List[Any] = { 'camembert-base': 512, } SCREAMING_SNAKE_CASE :List[str] = '▁' class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] ,A : List[str] ,A : List[Any]="<s>" ,A : Tuple="</s>" ,A : Any="</s>" ,A : Optional[Any]="<s>" ,A : Tuple="<unk>" ,A : str="<pad>" ,A : int="<mask>" ,A : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] ,A : Optional[Dict[str, Any]] = None ,**A : Optional[Any] ,): # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token __A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,additional_special_tokens=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A ) ) __A = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __A = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} __A = len(self.fairseq_tokens_to_ids ) __A = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self : int ,A : List[int] ,A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A = [self.cls_token_id] __A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1] def UpperCamelCase_ ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self : Dict ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self : int ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : List[str] ,A : Dict ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(A ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(A ) def UpperCamelCase_ ( self : Dict ,A : Tuple ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string.strip() def __getstate__( self : Dict ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : Union[str, Any] ,A : Any ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : Any ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,)
15
1
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler SCREAMING_SNAKE_CASE :int = 16 SCREAMING_SNAKE_CASE :Any = 32 def UpperCAmelCase ( a_ , a_ = 1_6 , a_ = "bert-base-cased" ) -> Union[str, Any]: """simple docstring""" __A = AutoTokenizer.from_pretrained(a_ ) __A = load_dataset("glue" , "mrpc" ) def tokenize_function(a_ ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __A = datasets.map( a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=a_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __A = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(a_ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(a_ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" ) return tokenizer.pad(a_ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. __A = DataLoader( tokenized_datasets["train"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) __A = DataLoader( tokenized_datasets["validation"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) return train_dataloader, eval_dataloader def UpperCAmelCase ( a_ , a_ ) -> List[str]: """simple docstring""" __A = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __A = config["lr"] __A = int(config["num_epochs"] ) __A = int(config["seed"] ) __A = int(config["batch_size"] ) __A = args.model_name_or_path set_seed(a_ ) __A , __A = get_dataloaders(a_ , a_ , a_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __A = AutoModelForSequenceClassification.from_pretrained(a_ , return_dict=a_ ) # Instantiate optimizer __A = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __A = optimizer_cls(params=model.parameters() , lr=a_ ) if accelerator.state.deepspeed_plugin is not None: __A = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: __A = 1 __A = (len(a_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __A = get_linear_schedule_with_warmup( optimizer=a_ , num_warmup_steps=0 , num_training_steps=a_ , ) else: __A = DummyScheduler(a_ , total_num_steps=a_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __A , __A , __A , __A , __A = accelerator.prepare( a_ , a_ , a_ , a_ , a_ ) # We need to keep track of how many total steps we have iterated over __A = 0 # We also need to keep track of the stating epoch so files are named properly __A = 0 # Now we train the model __A = evaluate.load("glue" , "mrpc" ) __A = 0 __A = {} for epoch in range(a_ , a_ ): model.train() for step, batch in enumerate(a_ ): __A = model(**a_ ) __A = outputs.loss __A = loss / gradient_accumulation_steps accelerator.backward(a_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __A = 0 for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __A = model(**a_ ) __A = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __A , __A = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(a_ ) - 1: __A = predictions[: len(eval_dataloader.dataset ) - samples_seen] __A = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=a_ , references=a_ , ) __A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , a_ ) __A = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: __A = eval_metric["accuracy"] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f: json.dump(a_ , a_ ) def UpperCAmelCase ( ) -> List[str]: """simple docstring""" __A = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=a_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=a_ , ) parser.add_argument( "--output_dir" , type=a_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--performance_lower_bound" , type=a_ , default=a_ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , ) parser.add_argument( "--num_epochs" , type=a_ , default=3 , help="Number of train epochs." , ) __A = parser.parse_args() __A = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6} training_function(a_ , a_ ) if __name__ == "__main__": main()
15
def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) <= 1: return [tuple(a_ )] __A = [] def generate(a_ , a_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , a_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even __A , __A = arr[k - 1], arr[i] else: # k is odd __A , __A = arr[k - 1], arr[0] generate(k - 1 , a_ ) generate(len(a_ ) , a_ ) return res if __name__ == "__main__": SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by a comma:\n').strip() SCREAMING_SNAKE_CASE :Dict = [int(item) for item in user_input.split(',')] print(heaps(arr))
15
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE :List[str] = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :List[str] = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
15
def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) <= 1: return lst __A = 1 while i < len(a_ ): if lst[i - 1] <= lst[i]: i += 1 else: __A , __A = lst[i], lst[i - 1] i -= 1 if i == 0: __A = 1 return lst if __name__ == "__main__": SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma:\n').strip() SCREAMING_SNAKE_CASE :List[Any] = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
15
1
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): SCREAMING_SNAKE_CASE :Optional[Any] = yaml.safe_load( '\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n' ) SCREAMING_SNAKE_CASE :Optional[int] = { 'name': 'root', 'text': '', 'is_empty_text': True, 'subsections': [ { 'name': 'Dataset Card for My Dataset', 'text': '', 'is_empty_text': True, 'subsections': [ {'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []}, { 'name': 'Dataset Description', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Dataset Summary', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [], }, { 'name': 'Supported Tasks and Leaderboards', 'text': '', 'is_empty_text': True, 'subsections': [], }, {'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []}, ], }, ], } ], } SCREAMING_SNAKE_CASE :List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' SCREAMING_SNAKE_CASE :Union[str, Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' SCREAMING_SNAKE_CASE :List[str] = { 'name': 'root', 'text': '', 'is_empty_text': True, 'subsections': [ { 'name': 'Dataset Card for My Dataset', 'text': '', 'is_empty_text': True, 'subsections': [ {'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []}, { 'name': 'Dataset Description', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Dataset Summary', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Extra Ignored Subsection', 'text': '', 'is_empty_text': True, 'subsections': [], } ], }, { 'name': 'Supported Tasks and Leaderboards', 'text': '', 'is_empty_text': True, 'subsections': [], }, {'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []}, ], }, ], } ], } SCREAMING_SNAKE_CASE :Optional[int] = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' SCREAMING_SNAKE_CASE :int = ( 'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.' ) SCREAMING_SNAKE_CASE :Dict = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' SCREAMING_SNAKE_CASE :List[str] = ( 'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.' ) SCREAMING_SNAKE_CASE :str = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' SCREAMING_SNAKE_CASE :int = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.' SCREAMING_SNAKE_CASE :Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' SCREAMING_SNAKE_CASE :List[str] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).' SCREAMING_SNAKE_CASE :Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n' SCREAMING_SNAKE_CASE :Dict = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.' SCREAMING_SNAKE_CASE :Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n' SCREAMING_SNAKE_CASE :str = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.' SCREAMING_SNAKE_CASE :Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n' SCREAMING_SNAKE_CASE :int = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.' SCREAMING_SNAKE_CASE :str = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' SCREAMING_SNAKE_CASE :int = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.' SCREAMING_SNAKE_CASE :List[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n' SCREAMING_SNAKE_CASE :int = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.' SCREAMING_SNAKE_CASE :Union[str, Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' SCREAMING_SNAKE_CASE :str = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.' SCREAMING_SNAKE_CASE :Tuple = '' SCREAMING_SNAKE_CASE :Tuple = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.' SCREAMING_SNAKE_CASE :str = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' SCREAMING_SNAKE_CASE :Optional[Any] = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.' @pytest.mark.parametrize( "readme_md, expected_dict" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def UpperCAmelCase ( a_ , a_ ) -> Tuple: """simple docstring""" assert ReadMe.from_string(a_ , a_ ).to_dict() == expected_dict @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def UpperCAmelCase ( a_ , a_ ) -> List[Any]: """simple docstring""" with pytest.raises(a_ , match=re.escape(expected_error.format(path="root" ) ) ): __A = ReadMe.from_string(a_ , a_ ) readme.validate() @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCAmelCase ( a_ , a_ ) -> Union[str, Any]: """simple docstring""" with pytest.raises(a_ , match=re.escape(expected_error.format(path="root" ) ) ): ReadMe.from_string(a_ , a_ ) @pytest.mark.parametrize( "readme_md," , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCAmelCase ( a_ ) -> Tuple: """simple docstring""" ReadMe.from_string(a_ , a_ , suppress_parsing_errors=a_ ) @pytest.mark.parametrize( "readme_md, expected_dict" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def UpperCAmelCase ( a_ , a_ ) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __A = Path(a_ ) / "README.md" with open(a_ , "w+" ) as readme_file: readme_file.write(a_ ) __A = ReadMe.from_readme(a_ , a_ ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def UpperCAmelCase ( a_ , a_ ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __A = Path(a_ ) / "README.md" with open(a_ , "w+" ) as readme_file: readme_file.write(a_ ) __A = expected_error.format(path=a_ ) with pytest.raises(a_ , match=re.escape(a_ ) ): __A = ReadMe.from_readme(a_ , a_ ) readme.validate() @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCAmelCase ( a_ , a_ ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __A = Path(a_ ) / "README.md" with open(a_ , "w+" ) as readme_file: readme_file.write(a_ ) __A = expected_error.format(path=a_ ) with pytest.raises(a_ , match=re.escape(a_ ) ): ReadMe.from_readme(a_ , a_ ) @pytest.mark.parametrize( "readme_md," , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCAmelCase ( a_ ) -> str: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __A = Path(a_ ) / "README.md" with open(a_ , "w+" ) as readme_file: readme_file.write(a_ ) ReadMe.from_readme(a_ , a_ , suppress_parsing_errors=a_ )
15
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 snake_case_ = 42 snake_case_ = None class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 2 @register_to_config def __init__( self : str ,A : float = 0.02 ,A : float = 1_00 ,A : float = 1.0_07 ,A : float = 80 ,A : float = 0.05 ,A : float = 50 ,): # standard deviation of the initial noise distribution __A = sigma_max # setable values __A = None __A = None __A = None # sigma(t_i) def UpperCamelCase_ ( self : str ,A : torch.FloatTensor ,A : Optional[int] = None ): return sample def UpperCamelCase_ ( self : Dict ,A : int ,A : Union[str, torch.device] = None ): __A = num_inference_steps __A = np.arange(0 ,self.num_inference_steps )[::-1].copy() __A = torch.from_numpy(A ).to(A ) __A = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __A = torch.tensor(A ,dtype=torch.floataa ,device=A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : torch.FloatTensor ,A : float ,A : Optional[torch.Generator] = None ): if self.config.s_min <= sigma <= self.config.s_max: __A = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 ) else: __A = 0 # sample eps ~ N(0, S_noise^2 * I) __A = self.config.s_noise * randn_tensor(sample.shape ,generator=A ).to(sample.device ) __A = sigma + gamma * sigma __A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase_ ( self : Dict ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_hat + sigma_hat * model_output __A = (sample_hat - pred_original_sample) / sigma_hat __A = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : Optional[int] ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : bool = True ,): __A = sample_prev + sigma_prev * model_output __A = (sample_prev - pred_original_sample) / sigma_prev __A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A ,derivative=A ,pred_original_sample=A ) def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : List[str] ,A : str ): raise NotImplementedError()
15
1
import sys import turtle def UpperCAmelCase ( a_ , a_ ) -> tuple[float, float]: """simple docstring""" return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def UpperCAmelCase ( a_ , a_ , a_ , a_ , ) -> None: """simple docstring""" my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(a_ , get_mid(a_ , a_ ) , get_mid(a_ , a_ ) , depth - 1 ) triangle(a_ , get_mid(a_ , a_ ) , get_mid(a_ , a_ ) , depth - 1 ) triangle(a_ , get_mid(a_ , a_ ) , get_mid(a_ , a_ ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( 'Correct format for using this script: ' 'python fractals.py <int:depth_for_fractal>' ) SCREAMING_SNAKE_CASE :Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('red') SCREAMING_SNAKE_CASE :List[str] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
15
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version SCREAMING_SNAKE_CASE :Union[str, Any] = get_logger(__name__) class UpperCAmelCase : '''simple docstring''' snake_case_ = "dummy_data" snake_case_ = "datasets" snake_case_ = False def __init__( self : Optional[int] ,A : str ,A : str ,A : Union[Version, str] ,A : Optional[str] = None ,A : bool = False ,A : bool = True ,A : Optional[List[Callable]] = None ,): __A = 0 __A = dataset_name __A = cache_dir __A = use_local_dummy_data __A = config # download_callbacks take a single url as input __A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root __A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general __A = str(A ) # to be downloaded __A = None __A = None @property def UpperCamelCase_ ( self : Union[str, Any] ): if self._dummy_file is None: __A = self.download_dummy_data() return self._dummy_file @property def UpperCamelCase_ ( self : Optional[Any] ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def UpperCamelCase_ ( self : List[Any] ): return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def UpperCamelCase_ ( self : Tuple ): __A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) __A = cached_path( A ,cache_dir=self.cache_dir ,extract_compressed_file=A ,force_extract=A ) return os.path.join(A ,self.dummy_file_name ) @property def UpperCamelCase_ ( self : str ): return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def UpperCamelCase_ ( self : Any ): if self._bucket_url is None: __A = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def UpperCamelCase_ ( self : Tuple ): # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,*A : Dict ): if self.load_existing_dummy_data: # dummy data is downloaded and tested __A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned __A = self.dummy_file_name # special case when data_url is a dict if isinstance(A ,A ): return self.create_dummy_data_dict(A ,A ) elif isinstance(A ,(list, tuple) ): return self.create_dummy_data_list(A ,A ) else: return self.create_dummy_data_single(A ,A ) def UpperCamelCase_ ( self : str ,A : List[Any] ,*A : List[Any] ): return self.download_and_extract(A ) def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Tuple ): return self.download_and_extract(A ) def UpperCamelCase_ ( self : Any ,A : Any ,*A : Optional[Any] ,**A : List[str] ): return path def UpperCamelCase_ ( self : str ): return {} def UpperCamelCase_ ( self : int ,A : int ,A : Tuple ): __A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(A ,A ): for single_url in single_urls: download_callback(A ) else: __A = single_urls download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(A ,A ): __A = [os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) for x in single_urls] else: __A = single_urls __A = os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) __A = value # make sure that values are unique if all(isinstance(A ,A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique __A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : str ): __A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one __A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,A ) ) for url in data_url ) __A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): __A = [data_url[0]] * len(A ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __A = os.path.join(A ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(A ) return dummy_data_list def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ): for download_callback in self.download_callbacks: download_callback(A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __A = os.path.join(A ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(A ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCamelCase_ ( self : int ): pass def UpperCamelCase_ ( self : Dict ): pass def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ): def _iter_archive_members(A : Optional[Any] ): # this preserves the order of the members inside the ZIP archive __A = Path(self.dummy_file ).parent __A = path.relative_to(A ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: __A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(A ) __A = Path(A ) __A = _iter_archive_members(A ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(A ).as_posix(), file_path.open("rb" ) def UpperCamelCase_ ( self : List[Any] ,A : Any ): if not isinstance(A ,A ): __A = [paths] for path in paths: if os.path.isfile(A ): if os.path.basename(A ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(A ): if os.path.basename(A ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(A ): if filename.startswith((".", "__") ): continue yield os.path.join(A ,A )
15
1
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = ["vqvae"] def __init__( self : List[str] ,A : AutoencoderKL ,A : UNetaDConditionModel ,A : Mel ,A : Union[DDIMScheduler, DDPMScheduler] ,): super().__init__() self.register_modules(unet=A ,scheduler=A ,mel=A ,vqvae=A ) def UpperCamelCase_ ( self : Tuple ): return 50 if isinstance(self.scheduler ,A ) else 10_00 @torch.no_grad() def __call__( self : Tuple ,A : int = 1 ,A : str = None ,A : np.ndarray = None ,A : int = 0 ,A : int = 0 ,A : int = None ,A : torch.Generator = None ,A : float = 0 ,A : float = 0 ,A : torch.Generator = None ,A : float = 0 ,A : torch.Tensor = None ,A : torch.Tensor = None ,A : int=True ,): __A = steps or self.get_default_steps() self.scheduler.set_timesteps(A ) __A = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: __A = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: __A = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) ,generator=A ,device=self.device ,) __A = noise __A = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(A ,A ) __A = self.mel.audio_slice_to_image(A ) __A = np.frombuffer(input_image.tobytes() ,dtype="uint8" ).reshape( (input_image.height, input_image.width) ) __A = (input_image / 2_55) * 2 - 1 __A = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device ) if self.vqvae is not None: __A = self.vqvae.encode(torch.unsqueeze(A ,0 ) ).latent_dist.sample( generator=A )[0] __A = self.vqvae.config.scaling_factor * input_images if start_step > 0: __A = self.scheduler.add_noise(A ,A ,self.scheduler.timesteps[start_step - 1] ) __A = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) __A = int(mask_start_secs * pixels_per_second ) __A = int(mask_end_secs * pixels_per_second ) __A = self.scheduler.add_noise(A ,A ,torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet ,A ): __A = self.unet(A ,A ,A )["sample"] else: __A = self.unet(A ,A )["sample"] if isinstance(self.scheduler ,A ): __A = self.scheduler.step( model_output=A ,timestep=A ,sample=A ,eta=A ,generator=A ,)["prev_sample"] else: __A = self.scheduler.step( model_output=A ,timestep=A ,sample=A ,generator=A ,)["prev_sample"] if mask is not None: if mask_start > 0: __A = mask[:, step, :, :mask_start] if mask_end > 0: __A = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance __A = 1 / self.vqvae.config.scaling_factor * images __A = self.vqvae.decode(A )["sample"] __A = (images / 2 + 0.5).clamp(0 ,1 ) __A = images.cpu().permute(0 ,2 ,3 ,1 ).numpy() __A = (images * 2_55).round().astype("uint8" ) __A = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(A ,mode="RGB" ).convert("L" ) for _ in images) ) __A = [self.mel.image_to_audio(A ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(A ) ) @torch.no_grad() def UpperCamelCase_ ( self : List[Any] ,A : List[Image.Image] ,A : int = 50 ): assert isinstance(self.scheduler ,A ) self.scheduler.set_timesteps(A ) __A = np.array( [np.frombuffer(image.tobytes() ,dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] ) __A = (sample / 2_55) * 2 - 1 __A = torch.Tensor(A ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ): __A = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps __A = self.scheduler.alphas_cumprod[t] __A = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) __A = 1 - alpha_prod_t __A = self.unet(A ,A )["sample"] __A = (1 - alpha_prod_t_prev) ** 0.5 * model_output __A = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) __A = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def UpperCamelCase_ ( A : torch.Tensor ,A : torch.Tensor ,A : float ): __A = acos(torch.dot(torch.flatten(A ) ,torch.flatten(A ) ) / torch.norm(A ) / torch.norm(A ) ) return sin((1 - alpha) * theta ) * xa / sin(A ) + sin(alpha * theta ) * xa / sin(A )
15
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE :List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :List[str] = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE :Union[str, Any] = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :List[Any] = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Dict = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Tuple = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Optional[Any] = ['LayoutLMv3FeatureExtractor'] SCREAMING_SNAKE_CASE :int = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys SCREAMING_SNAKE_CASE :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
from typing import Dict, Optional import numpy as np import datasets SCREAMING_SNAKE_CASE :List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' SCREAMING_SNAKE_CASE :List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' SCREAMING_SNAKE_CASE :str = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Tuple: """simple docstring""" if label_map is not None: for old_id, new_id in label_map.items(): __A = new_id # turn into Numpy arrays __A = np.array(a_ ) __A = np.array(a_ ) if reduce_labels: __A = 2_5_5 __A = label - 1 __A = 2_5_5 __A = label != ignore_index __A = np.not_equal(a_ , a_ ) __A = pred_label[mask] __A = np.array(a_ )[mask] __A = pred_label[pred_label == label] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Union[str, Any]: """simple docstring""" __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(a_ , a_ ): __A , __A , __A , __A = intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = None , a_ = False , ) -> str: """simple docstring""" __A , __A , __A , __A = total_intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) # compute metrics __A = {} __A = total_area_intersect.sum() / total_area_label.sum() __A = total_area_intersect / total_area_union __A = total_area_intersect / total_area_label __A = np.nanmean(a_ ) __A = np.nanmean(a_ ) __A = all_acc __A = iou __A = acc if nan_to_num is not None: __A = {metric: np.nan_to_num(a_ , nan=a_ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), } ) ,reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ] ,) def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Optional[Any] ,A : int ,A : bool ,A : Optional[int] = None ,A : Optional[Dict[int, int]] = None ,A : bool = False ,): __A = mean_iou( results=A ,gt_seg_maps=A ,num_labels=A ,ignore_index=A ,nan_to_num=A ,label_map=A ,reduce_labels=A ,) return iou_result
15
1
def UpperCAmelCase ( a_ , a_ ) -> str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) __A = str(bin(a_ ) ) binary_number += "0" * shift_amount return binary_number def UpperCAmelCase ( a_ , a_ ) -> str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) __A = str(bin(a_ ) )[2:] if shift_amount >= len(a_ ): return "0b0" __A = binary_number[: len(a_ ) - shift_amount] return "0b" + shifted_binary_number def UpperCAmelCase ( a_ , a_ ) -> str: """simple docstring""" if number >= 0: # Get binary representation of positive number __A = "0" + str(bin(a_ ) ).strip("-" )[2:] else: # Get binary (2's complement) representation of negative number __A = len(bin(a_ )[3:] ) # Find 2's complement of number __A = bin(abs(a_ ) - (1 << binary_number_length) )[3:] __A = ( "1" + "0" * (binary_number_length - len(a_ )) + binary_number ) if shift_amount >= len(a_ ): return "0b" + binary_number[0] * len(a_ ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(a_ ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
15
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :List[str] = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE :Dict = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } SCREAMING_SNAKE_CASE :Optional[Any] = { 'AI-Sweden/gpt-sw3-126m': 2048, 'AI-Sweden/gpt-sw3-350m': 2048, 'AI-Sweden/gpt-sw3-1.6b': 2048, 'AI-Sweden/gpt-sw3-6.7b': 2048, 'AI-Sweden/gpt-sw3-20b': 2048, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] def __init__( self : Optional[int] ,A : Optional[Any] ,A : Optional[int]=False ,A : int=False ,A : Union[str, Any]=False ,A : int=None ,A : Optional[Any]=None ,A : Union[str, Any]=None ,A : Optional[Any]=None ,A : Optional[Dict[str, Any]] = None ,**A : Tuple ,): __A = {} if sp_model_kwargs is None else sp_model_kwargs __A = kwargs.get("name_or_path" ) if name_or_path is None: logger.warning( "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b," " you are testing the model, this can safely be ignored" ) __A = "None" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __A = "<|endoftext|>" if eos_token is None else eos_token __A = "<unk>" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __A = unk_token if pad_token is None else pad_token __A = eos_token if bos_token is None else bos_token else: __A = "<pad>" if pad_token is None else pad_token __A = "<s>" if bos_token is None else bos_token super().__init__( do_lower_case=A ,remove_space=A ,keep_accents=A ,bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = do_lower_case __A = remove_space __A = keep_accents __A = vocab_file __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) # Used for whitespace normalization in input texts # fmt : off __A = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __A = re.compile( f'''[{''.join(map(A ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(1_27 ,1_60 ) ) + [1_60, 1_73, 82_03] ) )}]''' ) def __getstate__( self : Optional[int] ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : Optional[Any] ,A : Union[str, Any] ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase_ ( self : List[str] ): return len(self.sp_model ) def UpperCamelCase_ ( self : int ,A : str ): __A = self.non_printing_characters_re.sub("" ,A ) # Normalize whitespaces __A = "".join([char if char not in self.whitespaces else " " for char in text] ) # NFC Unicode normalization __A = unicodedata.normalize("NFC" ,A ) return text def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,**A : Optional[int] ): __A = self.preprocess_text(A ) return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.PieceToId(A ) def UpperCamelCase_ ( self : Dict ,A : int ): return self.sp_model.IdToPiece(A ) @staticmethod def UpperCamelCase_ ( A : str ): return out_string def UpperCamelCase_ ( self : str ,A : List[str] ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string def UpperCamelCase_ ( self : str ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self : List[str] ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self : Union[str, Any] ,A : Union[str, List[str]] ,A : Union[str, bool] = False ): if isinstance(A ,A ): __A = self.preprocess_text(A ) __A = self.sp_model.encode(A ) else: __A = [self.preprocess_text(A ) for t in text] __A = self.sp_model.encode(A ) if return_tensors is True or return_tensors == "pt": __A = torch.tensor(A ) return token_ids def UpperCamelCase_ ( self : List[Any] ,A : Union[int, List[int]] ): return self.sp_model.decode(A ) def UpperCamelCase_ ( self : List[str] ,A : "Conversation" ): __A = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] __A = ( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(A ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=A )
15
1
from datetime import datetime as dt import os from github import Github SCREAMING_SNAKE_CASE :int = [ 'good first issue', 'good second issue', 'good difficult issue', 'feature request', 'new model', 'wip', ] def UpperCAmelCase ( ) -> Dict: """simple docstring""" __A = Github(os.environ["GITHUB_TOKEN"] ) __A = g.get_repo("huggingface/transformers" ) __A = repo.get_issues(state="open" ) for issue in open_issues: __A = sorted([comment for comment in issue.get_comments()] , key=lambda a_ : i.created_at , reverse=a_ ) __A = comments[0] if len(a_ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="closed" ) elif ( (dt.utcnow() - issue.updated_at).days > 2_3 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
15
import numpy as np def UpperCAmelCase ( a_ , a_ , a_ = 1E-12 , a_ = 1_0_0 , ) -> tuple[float, np.ndarray]: """simple docstring""" assert np.shape(a_ )[0] == np.shape(a_ )[1] # Ensure proper dimensionality. assert np.shape(a_ )[0] == np.shape(a_ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ ) __A = np.iscomplexobj(a_ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(a_ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __A = False __A = 0 __A = 0 __A = 1E12 while not convergence: # Multiple matrix by the vector. __A = np.dot(a_ , a_ ) # Normalize the resulting output vector. __A = w / np.linalg.norm(a_ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __A = vector.conj().T if is_complex else vector.T __A = np.dot(a_ , np.dot(a_ , a_ ) ) # Check convergence. __A = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __A = True __A = lambda_ if is_complex: __A = np.real(lambda_ ) return lambda_, vector def UpperCAmelCase ( ) -> None: """simple docstring""" __A = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] ) __A = np.array([4_1, 4, 2_0] ) __A = real_input_matrix.astype(np.complexaaa ) __A = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __A = np.array([4_1, 4, 2_0] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __A = real_input_matrix __A = real_vector elif problem_type == "complex": __A = complex_input_matrix __A = complex_vector # Our implementation. __A , __A = power_iteration(a_ , a_ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __A , __A = np.linalg.eigh(a_ ) # Last eigenvalue is the maximum one. __A = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __A = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
15
1
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def UpperCAmelCase ( a_ , a_ , a_=None , a_=None ) -> Any: """simple docstring""" if attention_mask is None: __A = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class UpperCAmelCase : '''simple docstring''' snake_case_ = OPTConfig snake_case_ = {} snake_case_ = "gelu" def __init__( self : Tuple ,A : Tuple ,A : Optional[Any]=13 ,A : Union[str, Any]=7 ,A : Tuple=True ,A : Optional[int]=False ,A : List[str]=99 ,A : str=16 ,A : Optional[Any]=2 ,A : List[str]=4 ,A : Optional[int]=4 ,A : str="gelu" ,A : Any=0.1 ,A : int=0.1 ,A : int=20 ,A : Tuple=2 ,A : Optional[int]=1 ,A : Union[str, Any]=0 ,A : str=16 ,A : Dict=16 ,): __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = eos_token_id __A = pad_token_id __A = bos_token_id __A = embed_dim __A = word_embed_proj_dim __A = False def UpperCamelCase_ ( self : List[str] ): __A = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) __A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) __A = tf.concat([input_ids, eos_tensor] ,axis=1 ) __A = self.config_cls( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,embed_dim=self.embed_dim ,word_embed_proj_dim=self.word_embed_proj_dim ,is_encoder_decoder=A ,**self.config_updates ,) __A = prepare_opt_inputs_dict(A ,A ) return config, inputs_dict def UpperCamelCase_ ( self : Dict ,A : int ,A : Optional[int] ): __A = TFOPTModel(config=A ) __A = inputs_dict["input_ids"] __A = input_ids[:1, :] __A = inputs_dict["attention_mask"][:1, :] __A = 1 # first forward pass __A = model(A ,attention_mask=A ,use_cache=A ) __A , __A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __A = ids_tensor((self.batch_size, 3) ,config.vocab_size ) __A = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and __A = tf.concat([input_ids, next_tokens] ,axis=-1 ) __A = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) __A = model(A ,attention_mask=A )[0] __A = model(A ,attention_mask=A ,past_key_values=A )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice __A = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) __A = output_from_no_past[:, -3:, random_slice_idx] __A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(A ,A ,rtol=1E-3 ) @require_tf class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () snake_case_ = (TFOPTForCausalLM,) if is_tf_available() else () snake_case_ = ( {"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = 10 def UpperCamelCase_ ( self : Union[str, Any] ): __A = TFOPTModelTester(self ) __A = ConfigTester(self ,config_class=A ) def UpperCamelCase_ ( self : Dict ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Optional[int] ): __A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*A ) def UpperCamelCase_ ( self : Any ): __A , __A = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(A : Optional[int] ,A : Optional[Any] ): if hasattr(A ,"weight" ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(A ,"weight" ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings __A = model_class(config=A ) __A = _get_word_embedding_weight(A ,model.get_input_embeddings() ) __A = _get_word_embedding_weight(A ,model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(A ) __A = _get_word_embedding_weight(A ,model.get_input_embeddings() ) __A = _get_word_embedding_weight(A ,model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. __A = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] ,A ) # check that weights remain the same after resizing __A = True for pa, pa in zip(old_input_embeddings.value() ,new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: __A = False self.assertTrue(A ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] ,A ) __A = True for pa, pa in zip(old_output_embeddings.value() ,new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: __A = False self.assertTrue(A ) def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" return tf.constant(a_ , dtype=tf.intaa ) @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' snake_case_ = 99 def UpperCamelCase_ ( self : Tuple ): __A = tf.ones((4, 1) ,dtype=tf.intaa ) * 2 __A = tf.concat([ids_tensor((4, 6) ,self.vocab_size - 3 ) + 3, eos_column_vector] ,axis=1 ) __A = input_ids.shape[0] __A = OPTConfig( vocab_size=self.vocab_size ,hidden_size=24 ,num_hidden_layers=2 ,num_attention_heads=2 ,ffn_dim=32 ,max_position_embeddings=48 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,) return config, input_ids, batch_size @require_sentencepiece @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self : List[str] ): __A = TFOPTModel.from_pretrained("facebook/opt-350m" ) __A = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) __A = tf.not_equal(A ,model.config.pad_token_id ) with tf.GradientTape(): __A = model(input_ids=A ,attention_mask=A ).last_hidden_state __A = (1, 11, 5_12) self.assertEqual(output.shape ,A ) __A = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] ,A ,atol=4E-3 ) ) __A = tf.function(A ,jit_compile=A ) __A = xla_generate(A ,A )[0] self.assertTrue(np.allclose(output[:, :3, :3] ,A ,atol=4E-2 ) ) @require_tf @slow class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : Optional[Any] ): super().setUp() __A = "facebook/opt-350m" def UpperCamelCase_ ( self : Optional[int] ): __A = TFOPTForCausalLM.from_pretrained(self.path_model ) __A = GPTaTokenizer.from_pretrained(self.path_model ) __A = [ "Today is a beautiful day and I want to", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False __A = tokenizer(A ,return_tensors="tf" ,padding=A ,add_special_tokens=A ) __A = tf.math.reduce_mean(model(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 ) __A = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(A ,A ,atol=1E-4 ) ) __A = tf.function(A ,jit_compile=A ) __A = tf.math.reduce_mean(xla_generate(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 ) self.assertTrue(np.allclose(A ,A ,atol=1E-4 ) ) @require_tf @slow class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @property def UpperCamelCase_ ( self : int ): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def UpperCamelCase_ ( self : Optional[int] ): __A = "facebook/opt-125m" __A = [ "Today is a beautiful day and I want to", "In the city of New York, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] __A = [] __A = GPTaTokenizer.from_pretrained(A ) __A = TFOPTForCausalLM.from_pretrained(A ) for prompt in self.prompts: __A = tokenizer(A ,return_tensors="tf" ).input_ids __A = model.generate(A ,max_length=10 ) __A = tokenizer.batch_decode(A ,skip_special_tokens=A ) predicted_outputs += generated_string self.assertListEqual(A ,A ) def UpperCamelCase_ ( self : int ): __A = "facebook/opt-350m" __A = GPTaTokenizer.from_pretrained(A ) __A = TFOPTForCausalLM.from_pretrained(A ) __A = "left" # use different length sentences to test batching __A = [ "Hello, my dog is a little", "Today, I", ] __A = tokenizer(A ,return_tensors="tf" ,padding=A ) __A = inputs["input_ids"] __A = model.generate(input_ids=A ,attention_mask=inputs["attention_mask"] ) __A = tokenizer(sentences[0] ,return_tensors="tf" ).input_ids __A = model.generate(input_ids=A ) __A = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs["attention_mask"][-1] ,tf.intaa ) ) __A = tokenizer(sentences[1] ,return_tensors="tf" ).input_ids __A = model.generate(input_ids=A ,max_length=model.config.max_length - num_paddings ) __A = tokenizer.batch_decode(A ,skip_special_tokens=A ) __A = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=A ) __A = tokenizer.decode(output_padded[0] ,skip_special_tokens=A ) __A = [ "Hello, my dog is a little bit of a dork.\nI'm a little bit", "Today, I was in the middle of a conversation with a friend about the", ] self.assertListEqual(A ,A ) self.assertListEqual(A ,[non_padded_sentence, padded_sentence] ) def UpperCamelCase_ ( self : Dict ): __A = "facebook/opt-350m" __A = [ "Today is a beautiful day and I want to", "In the city of San Francisco, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] __A = [] __A = GPTaTokenizer.from_pretrained(A ) __A = TFOPTForCausalLM.from_pretrained(A ) for prompt in self.prompts: __A = tokenizer(A ,return_tensors="tf" ).input_ids __A = model.generate(A ,max_length=10 ) __A = tokenizer.batch_decode(A ,skip_special_tokens=A ) predicted_outputs += generated_string self.assertListEqual(A ,A )
15
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE :str = 'RegNetConfig' # Base docstring SCREAMING_SNAKE_CASE :List[str] = 'facebook/regnet-y-040' SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 1088, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE :Optional[int] = 'facebook/regnet-y-040' SCREAMING_SNAKE_CASE :Any = 'tabby, tabby cat' SCREAMING_SNAKE_CASE :Optional[int] = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple ,A : int ,A : int = 3 ,A : int = 1 ,A : int = 1 ,A : Optional[str] = "relu" ,**A : Dict ,): super().__init__(**A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __A = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __A = tf.keras.layers.ConvaD( filters=A ,kernel_size=A ,strides=A ,padding="VALID" ,groups=A ,use_bias=A ,name="convolution" ,) __A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" ) __A = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : List[Any] ,A : Any ): __A = self.convolution(self.padding(A ) ) __A = self.normalization(A ) __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple ,A : RegNetConfig ,**A : str ): super().__init__(**A ) __A = config.num_channels __A = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="embedder" ,) def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ): __A = shape_list(A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __A = tf.transpose(A ,perm=(0, 2, 3, 1) ) __A = self.embedder(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Optional[int] ,A : int ,A : int = 2 ,**A : Tuple ): super().__init__(**A ) __A = tf.keras.layers.ConvaD( filters=A ,kernel_size=1 ,strides=A ,use_bias=A ,name="convolution" ) __A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" ) def UpperCamelCase_ ( self : Union[str, Any] ,A : tf.Tensor ,A : bool = False ): return self.normalization(self.convolution(A ) ,training=A ) class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Dict ,A : int ,A : int ,**A : str ): super().__init__(**A ) __A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" ) __A = [ tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="relu" ,name="attention.0" ), tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="sigmoid" ,name="attention.2" ), ] def UpperCamelCase_ ( self : Dict ,A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __A = self.pooler(A ) for layer_module in self.attention: __A = layer_module(A ) __A = hidden_state * pooled return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : Optional[int] ): super().__init__(**A ) __A = in_channels != out_channels or stride != 1 __A = max(1 ,out_channels // config.groups_width ) __A = ( TFRegNetShortCut(A ,stride=A ,name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" ,name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __A = [ TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ), TFRegNetConvLayer( A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ), TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.2" ), ] __A = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : int ,A : Optional[int] ): __A = hidden_state for layer_module in self.layers: __A = layer_module(A ) __A = self.shortcut(A ) hidden_state += residual __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[Any] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : str ): super().__init__(**A ) __A = in_channels != out_channels or stride != 1 __A = max(1 ,out_channels // config.groups_width ) __A = ( TFRegNetShortCut(A ,stride=A ,name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" ,name="shortcut" ) ) __A = [ TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ), TFRegNetConvLayer( A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ), TFRegNetSELayer(A ,reduced_channels=int(round(in_channels / 4 ) ) ,name="layer.2" ), TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.3" ), ] __A = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict ,A : Any ): __A = hidden_state for layer_module in self.layers: __A = layer_module(A ) __A = self.shortcut(A ) hidden_state += residual __A = self.activation(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 2 ,A : int = 2 ,**A : Optional[int] ): super().__init__(**A ) __A = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __A = [ # downsampling is done in the first layer with stride of 2 layer(A ,A ,A ,stride=A ,name="layers.0" ), *[layer(A ,A ,A ,name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Any ,A : List[str] ): for layer_module in self.layers: __A = layer_module(A ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any ,A : RegNetConfig ,**A : List[str] ): super().__init__(**A ) __A = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( A ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="stages.0" ,) ) __A = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(A ,config.depths[1:] ) ): self.stages.append(TFRegNetStage(A ,A ,A ,depth=A ,name=f'''stages.{i+1}''' ) ) def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor ,A : bool = False ,A : bool = True ): __A = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __A = hidden_states + (hidden_state,) __A = stage_module(A ) if output_hidden_states: __A = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=A ,hidden_states=A ) @keras_serializable class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' snake_case_ = RegNetConfig def __init__( self : int ,A : Optional[int] ,**A : Dict ): super().__init__(**A ) __A = config __A = TFRegNetEmbeddings(A ,name="embedder" ) __A = TFRegNetEncoder(A ,name="encoder" ) __A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" ) @unpack_inputs def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : bool = False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.embedder(A ,training=A ) __A = self.encoder( A ,output_hidden_states=A ,return_dict=A ,training=A ) __A = encoder_outputs[0] __A = self.pooler(A ) # Change to NCHW output format have uniformity in the modules __A = tf.transpose(A ,perm=(0, 3, 1, 2) ) __A = tf.transpose(A ,perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __A = tuple([tf.transpose(A ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A ,pooler_output=A ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = RegNetConfig snake_case_ = "regnet" snake_case_ = "pixel_values" @property def UpperCamelCase_ ( self : Optional[Any] ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )} SCREAMING_SNAKE_CASE :Dict = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' SCREAMING_SNAKE_CASE :Dict = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,A : RegNetConfig ,*A : List[Any] ,**A : str ): super().__init__(A ,*A ,**A ) __A = TFRegNetMainLayer(A ,name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : int=False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.regnet( pixel_values=A ,output_hidden_states=A ,return_dict=A ,training=A ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[int] ,A : RegNetConfig ,*A : str ,**A : Tuple ): super().__init__(A ,*A ,**A ) __A = config.num_labels __A = TFRegNetMainLayer(A ,name="regnet" ) # classification head __A = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor = None ,A : tf.Tensor = None ,A : bool = None ,A : bool = None ,A : Union[str, Any]=False ,): __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.regnet( A ,output_hidden_states=A ,return_dict=A ,training=A ) __A = outputs.pooler_output if return_dict else outputs[1] __A = self.classifier[0](A ) __A = self.classifier[1](A ) __A = None if labels is None else self.hf_compute_loss(labels=A ,logits=A ) if not return_dict: __A = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=A ,logits=A ,hidden_states=outputs.hidden_states )
15
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE :int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Union[str, Any] = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
import math def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list: """simple docstring""" __A = end or len(a_ ) for i in range(a_ , a_ ): __A = i __A = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __A = array[temp_index - 1] temp_index -= 1 __A = temp_index_value return array def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap """simple docstring""" __A = index __A = 2 * index + 1 # Left Node __A = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __A = left_index if right_index < heap_size and array[largest] < array[right_index]: __A = right_index if largest != index: __A , __A = array[largest], array[index] heapify(a_ , a_ , a_ ) def UpperCAmelCase ( a_ ) -> list: """simple docstring""" __A = len(a_ ) for i in range(n // 2 , -1 , -1 ): heapify(a_ , a_ , a_ ) for i in range(n - 1 , 0 , -1 ): __A , __A = array[0], array[i] heapify(a_ , 0 , a_ ) return array def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" __A = low __A = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __A , __A = array[j], array[i] i += 1 def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) == 0: return array __A = 2 * math.ceil(math.loga(len(a_ ) ) ) __A = 1_6 return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ ) def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(a_ ) max_depth -= 1 __A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 ) __A = partition(a_ , a_ , a_ , a_ ) intro_sort(a_ , a_ , a_ , a_ , a_ ) __A = p return insertion_sort(a_ , a_ , a_ ) if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip() SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')] print(sort(unsorted))
15
1
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants SCREAMING_SNAKE_CASE :Optional[Any] = Mapping[str, np.ndarray] SCREAMING_SNAKE_CASE :List[str] = Mapping[str, Any] # Is a nested dict. SCREAMING_SNAKE_CASE :int = 0.01 @dataclasses.dataclass(frozen=__SCREAMING_SNAKE_CASE ) class UpperCAmelCase : '''simple docstring''' snake_case_ = 42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. snake_case_ = 42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. snake_case_ = 42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. snake_case_ = 42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. snake_case_ = 42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions snake_case_ = None # Optional remark about the protein. Included as a comment in output PDB # files snake_case_ = None # Templates used to generate this protein (prediction-only) snake_case_ = None # Chain corresponding to each parent snake_case_ = None def UpperCAmelCase ( a_ ) -> Protein: """simple docstring""" __A = r"(\[[A-Z]+\]\n)" __A = [tag.strip() for tag in re.split(a_ , a_ ) if len(a_ ) > 0] __A = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] ) __A = ["N", "CA", "C"] __A = None __A = None __A = None for g in groups: if "[PRIMARY]" == g[0]: __A = g[1][0].strip() for i in range(len(a_ ) ): if seq[i] not in residue_constants.restypes: __A = "X" # FIXME: strings are immutable __A = np.array( [residue_constants.restype_order.get(a_ , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: __A = [] for axis in range(3 ): tertiary.append(list(map(a_ , g[1][axis].split() ) ) ) __A = np.array(a_ ) __A = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(a_ ): __A = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: __A = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) ) __A = np.zeros( ( len(a_ ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(a_ ): __A = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=a_ , atom_mask=a_ , aatype=a_ , residue_index=np.arange(len(a_ ) ) , b_factors=a_ , ) def UpperCAmelCase ( a_ , a_ = 0 ) -> List[str]: """simple docstring""" __A = [] __A = prot.remark if remark is not None: pdb_headers.append(F'''REMARK {remark}''' ) __A = prot.parents __A = prot.parents_chain_index if parents is not None and parents_chain_index is not None: __A = [p for i, p in zip(a_ , a_ ) if i == chain_id] if parents is None or len(a_ ) == 0: __A = ["N/A"] pdb_headers.append(F'''PARENT {' '.join(a_ )}''' ) return pdb_headers def UpperCAmelCase ( a_ , a_ ) -> str: """simple docstring""" __A = [] __A = pdb_str.split("\n" ) __A = prot.remark if remark is not None: out_pdb_lines.append(F'''REMARK {remark}''' ) __A = 42 if prot.parents is not None and len(prot.parents ) > 0: __A = [] if prot.parents_chain_index is not None: __A = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(a_ ) , [] ) parent_dict[str(a_ )].append(a_ ) __A = max([int(a_ ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): __A = parent_dict.get(str(a_ ) , ["N/A"] ) parents_per_chain.append(a_ ) else: parents_per_chain.append(list(prot.parents ) ) else: __A = [["N/A"]] def make_parent_line(a_ ) -> str: return F'''PARENT {' '.join(a_ )}''' out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) __A = 0 for i, l in enumerate(a_ ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(a_ ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(a_ ): __A = parents_per_chain[chain_counter] else: __A = ["N/A"] out_pdb_lines.append(make_parent_line(a_ ) ) return "\n".join(a_ ) def UpperCAmelCase ( a_ ) -> str: """simple docstring""" __A = residue_constants.restypes + ["X"] def res_atoa(a_ ) -> str: return residue_constants.restype_atoa.get(restypes[r] , "UNK" ) __A = residue_constants.atom_types __A = [] __A = prot.atom_mask __A = prot.aatype __A = prot.atom_positions __A = prot.residue_index.astype(np.intaa ) __A = prot.b_factors __A = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError("Invalid aatypes." ) __A = get_pdb_headers(a_ ) if len(a_ ) > 0: pdb_lines.extend(a_ ) __A = aatype.shape[0] __A = 1 __A = 0 __A = string.ascii_uppercase __A = None # Add all atom sites. for i in range(a_ ): __A = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(a_ , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue __A = "ATOM" __A = atom_name if len(a_ ) == 4 else F''' {atom_name}''' __A = "" __A = "" __A = 1.00 __A = atom_name[0] # Protein supports only C, N, O, S, this works. __A = "" __A = "A" if chain_index is not None: __A = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! __A = ( F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}''' F'''{res_name_a:>3} {chain_tag:>1}''' F'''{residue_index[i]:>4}{insertion_code:>1} ''' F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}''' F'''{occupancy:>6.2f}{b_factor:>6.2f} ''' F'''{element:>2}{charge:>2}''' ) pdb_lines.append(a_ ) atom_index += 1 __A = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: __A = True __A = chain_index[i + 1] if should_terminate: # Close the chain. __A = "TER" __A = ( F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}''' ) pdb_lines.append(a_ ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(a_ , a_ ) ) pdb_lines.append("END" ) pdb_lines.append("" ) return "\n".join(a_ ) def UpperCAmelCase ( a_ ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def UpperCAmelCase ( a_ , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , ) -> Protein: """simple docstring""" return Protein( aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=a_ , remark=a_ , parents=a_ , parents_chain_index=a_ , )
15
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml SCREAMING_SNAKE_CASE :Optional[int] = NewType('DataClass', Any) SCREAMING_SNAKE_CASE :int = NewType('DataClassType', Any) def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" if isinstance(a_ , a_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' ) def UpperCAmelCase ( a_ ) -> Callable[[str], Any]: """simple docstring""" __A = {str(a_ ): choice for choice in choices} return lambda a_ : str_to_choice.get(a_ , a_ ) def UpperCAmelCase ( *, a_ = None , a_ = None , a_ = dataclasses.MISSING , a_ = dataclasses.MISSING , a_ = None , **a_ , ) -> dataclasses.Field: """simple docstring""" if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls __A = {} if aliases is not None: __A = aliases if help is not None: __A = help return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_ ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 def __init__( self : Union[str, Any] ,A : Union[DataClassType, Iterable[DataClassType]] ,**A : List[Any] ): # To make the default appear when using --help if "formatter_class" not in kwargs: __A = ArgumentDefaultsHelpFormatter super().__init__(**A ) if dataclasses.is_dataclass(A ): __A = [dataclass_types] __A = list(A ) for dtype in self.dataclass_types: self._add_dataclass_arguments(A ) @staticmethod def UpperCamelCase_ ( A : ArgumentParser ,A : dataclasses.Field ): __A = f'''--{field.name}''' __A = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type ,A ): raise RuntimeError( "Unresolved type detected, which should have been done with the help of " "`typing.get_type_hints` method by default" ) __A = kwargs.pop("aliases" ,[] ) if isinstance(A ,A ): __A = [aliases] __A = getattr(field.type ,"__origin__" ,field.type ) if origin_type is Union or (hasattr(A ,"UnionType" ) and isinstance(A ,types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(A ) not in field.type.__args__ ): raise ValueError( "Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because" " the argument parser only supports one type per argument." f''' Problem encountered in field \'{field.name}\'.''' ) if type(A ) not in field.type.__args__: # filter `str` in Union __A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] __A = getattr(field.type ,"__origin__" ,field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) __A = ( field.type.__args__[0] if isinstance(A ,field.type.__args__[1] ) else field.type.__args__[1] ) __A = getattr(field.type ,"__origin__" ,field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) __A = {} if origin_type is Literal or (isinstance(field.type ,A ) and issubclass(field.type ,A )): if origin_type is Literal: __A = field.type.__args__ else: __A = [x.value for x in field.type] __A = make_choice_type_function(kwargs["choices"] ) if field.default is not dataclasses.MISSING: __A = field.default else: __A = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument __A = copy(A ) # Hack because type=bool in argparse does not behave as we want. __A = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. __A = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way __A = default # This tells argparse we accept 0 or 1 value after --field_name __A = "?" # This is the value that will get picked if we do --field_name (without value) __A = True elif isclass(A ) and issubclass(A ,A ): __A = field.type.__args__[0] __A = "+" if field.default_factory is not dataclasses.MISSING: __A = field.default_factory() elif field.default is dataclasses.MISSING: __A = True else: __A = field.type if field.default is not dataclasses.MISSING: __A = field.default elif field.default_factory is not dataclasses.MISSING: __A = field.default_factory() else: __A = True parser.add_argument(A ,*A ,**A ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): __A = False parser.add_argument(f'''--no_{field.name}''' ,action="store_false" ,dest=field.name ,**A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : DataClassType ): if hasattr(A ,"_argument_group_name" ): __A = self.add_argument_group(dtype._argument_group_name ) else: __A = self try: __A = get_type_hints(A ) except NameError: raise RuntimeError( f'''Type resolution failed for {dtype}. Try declaring the class in global scope or ''' "removing line of `from __future__ import annotations` which opts in Postponed " "Evaluation of Annotations (PEP 563)" ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A ): __A = ".".join(map(A ,sys.version_info[:3] ) ) raise RuntimeError( f'''Type resolution failed for {dtype} on Python {python_version}. Try removing ''' "line of `from __future__ import annotations` which opts in union types as " "`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To " "support Python versions that lower than 3.10, you need to use " "`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of " "`X | None`." ) from ex raise for field in dataclasses.fields(A ): if not field.init: continue __A = type_hints[field.name] self._parse_dataclass_field(A ,A ) def UpperCamelCase_ ( self : Union[str, Any] ,A : List[Any]=None ,A : List[Any]=False ,A : Optional[Any]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=None ,): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): __A = [] if args_filename: args_files.append(Path(A ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values __A = ArgumentParser() args_file_parser.add_argument(A ,type=A ,action="append" ) # Use only remaining args for further parsing (remove the args_file_flag) __A , __A = args_file_parser.parse_known_args(args=A ) __A = vars(A ).get(args_file_flag.lstrip("-" ) ,A ) if cmd_args_file_paths: args_files.extend([Path(A ) for p in cmd_args_file_paths] ) __A = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last __A = file_args + args if args is not None else file_args + sys.argv[1:] __A , __A = self.parse_known_args(args=A ) __A = [] for dtype in self.dataclass_types: __A = {f.name for f in dataclasses.fields(A ) if f.init} __A = {k: v for k, v in vars(A ).items() if k in keys} for k in keys: delattr(A ,A ) __A = dtype(**A ) outputs.append(A ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(A ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' ) return (*outputs,) def UpperCamelCase_ ( self : Dict ,A : Dict[str, Any] ,A : bool = False ): __A = set(args.keys() ) __A = [] for dtype in self.dataclass_types: __A = {f.name for f in dataclasses.fields(A ) if f.init} __A = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) __A = dtype(**A ) outputs.append(A ) if not allow_extra_keys and unused_keys: raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(A )}''' ) return tuple(A ) def UpperCamelCase_ ( self : List[str] ,A : str ,A : bool = False ): with open(Path(A ) ,encoding="utf-8" ) as open_json_file: __A = json.loads(open_json_file.read() ) __A = self.parse_dict(A ,allow_extra_keys=A ) return tuple(A ) def UpperCamelCase_ ( self : int ,A : str ,A : bool = False ): __A = self.parse_dict(yaml.safe_load(Path(A ).read_text() ) ,allow_extra_keys=A ) return tuple(A )
15
1
from math import factorial def UpperCAmelCase ( a_ , a_ ) -> int: """simple docstring""" if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(a_ ) // (factorial(a_ ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', f'''fifty-two card deck is: {combinations(52, 5)}\n''', ) print( 'If a class of 40 students must be arranged into groups of', f'''4 for group projects, there are {combinations(40, 4)} ways''', 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', f'''are {combinations(10, 3)} ways that first, second and''', 'third place can be awarded.', )
15
SCREAMING_SNAKE_CASE :Any = 256 # Modulus to hash a string SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003 def UpperCAmelCase ( a_ , a_ ) -> bool: """simple docstring""" __A = len(a_ ) __A = len(a_ ) if p_len > t_len: return False __A = 0 __A = 0 __A = 1 # Calculating the hash of pattern and substring of text for i in range(a_ ): __A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ) -> None: """simple docstring""" __A = "abc1abc12" __A = "alskfjaldsabc1abc1abc12k23adsfabcabc" __A = "alskfjaldsk23adsfabcabc" assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ ) # Test 2) __A = "ABABX" __A = "ABABZABABYABABX" assert rabin_karp(a_ , a_ ) # Test 3) __A = "AAAB" __A = "ABAAAAAB" assert rabin_karp(a_ , a_ ) # Test 4) __A = "abcdabcy" __A = "abcxabcdabxabcdabcdabcy" assert rabin_karp(a_ , a_ ) # Test 5) __A = "Lü" __A = "Lüsai" assert rabin_karp(a_ , a_ ) __A = "Lue" assert not rabin_karp(a_ , a_ ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
15
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
15
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel SCREAMING_SNAKE_CASE :Union[str, Any] = False SCREAMING_SNAKE_CASE :Any = True SCREAMING_SNAKE_CASE :Tuple = False if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() parser.add_argument( '--repo_path', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE :Dict = { 'image_size': 'sample_size', 'num_res_blocks': 'layers_per_block', 'block_channels': 'block_out_channels', 'down_blocks': 'down_block_types', 'up_blocks': 'up_block_types', 'downscale_freq_shift': 'freq_shift', 'resnet_num_groups': 'norm_num_groups', 'resnet_act_fn': 'act_fn', 'resnet_eps': 'norm_eps', 'num_head_channels': 'attention_head_dim', } SCREAMING_SNAKE_CASE :Optional[int] = { 'time_steps': 'time_proj', 'mid': 'mid_block', 'downsample_blocks': 'down_blocks', 'upsample_blocks': 'up_blocks', } SCREAMING_SNAKE_CASE :int = '' if has_file(args.repo_path, 'config.json') else 'unet' with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader: SCREAMING_SNAKE_CASE :Dict = reader.read() SCREAMING_SNAKE_CASE :List[str] = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, 'config.json'): SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel(**config) else: SCREAMING_SNAKE_CASE :Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel SCREAMING_SNAKE_CASE :List[str] = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) SCREAMING_SNAKE_CASE :List[str] = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: SCREAMING_SNAKE_CASE :Optional[Any] = config[key] del config[key] SCREAMING_SNAKE_CASE :Optional[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']] SCREAMING_SNAKE_CASE :List[Any] = [k.replace('UNetRes', '') for k in config['up_block_types']] if do_only_weights: SCREAMING_SNAKE_CASE :Tuple = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin')) SCREAMING_SNAKE_CASE :Any = {} for param_key, param_value in state_dict.items(): if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'): continue SCREAMING_SNAKE_CASE :List[str] = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('.')[0] == key: SCREAMING_SNAKE_CASE :List[Any] = param_value SCREAMING_SNAKE_CASE :str = True if not has_changed: SCREAMING_SNAKE_CASE :List[str] = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
15
1
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self : Optional[Any] ): __A = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A ,"width_multiplier" ) ) class UpperCAmelCase : '''simple docstring''' def __init__( self : List[str] ,A : List[Any] ,A : Optional[int]=13 ,A : Dict=64 ,A : Optional[Any]=2 ,A : Optional[int]=3 ,A : int="swish" ,A : Tuple=3 ,A : Tuple=32 ,A : int=0.1 ,A : Any=0.02 ,A : Any=True ,A : Optional[int]=True ,A : Tuple=10 ,A : Any=None ,A : Any=0.25 ,A : Tuple=0.0 ,A : Optional[int]=0.0 ,): __A = parent __A = batch_size __A = image_size __A = patch_size __A = num_channels __A = make_divisible(5_12 * width_multiplier ,divisor=8 ) __A = hidden_act __A = conv_kernel_size __A = output_stride __A = classifier_dropout_prob __A = use_labels __A = is_training __A = num_labels __A = initializer_range __A = scope __A = width_multiplier __A = ffn_dropout __A = attn_dropout def UpperCamelCase_ ( self : Tuple ): __A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __A = None __A = None if self.use_labels: __A = ids_tensor([self.batch_size] ,self.num_labels ) __A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) __A = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self : List[Any] ): return MobileViTVaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,) def UpperCamelCase_ ( self : int ,A : Any ,A : Any ,A : Union[str, Any] ,A : Optional[int] ): __A = MobileViTVaModel(config=A ) model.to(A ) model.eval() __A = model(A ) self.parent.assertEqual( result.last_hidden_state.shape ,( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def UpperCamelCase_ ( self : List[Any] ,A : Optional[Any] ,A : Optional[Any] ,A : int ,A : Tuple ): __A = self.num_labels __A = MobileViTVaForImageClassification(A ) model.to(A ) model.eval() __A = model(A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : List[str] ,A : Union[str, Any] ,A : int ): __A = self.num_labels __A = MobileViTVaForSemanticSegmentation(A ) model.to(A ) model.eval() __A = model(A ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) __A = model(A ,labels=A ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def UpperCamelCase_ ( self : Dict ): __A = self.prepare_config_and_inputs() __A , __A , __A , __A = config_and_inputs __A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) snake_case_ = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def UpperCamelCase_ ( self : Union[str, Any] ): __A = MobileViTVaModelTester(self ) __A = MobileViTVaConfigTester(self ,config_class=A ,has_text_modality=A ) def UpperCamelCase_ ( self : Optional[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="MobileViTV2 does not use inputs_embeds" ) def UpperCamelCase_ ( self : List[str] ): pass @unittest.skip(reason="MobileViTV2 does not support input and output embeddings" ) def UpperCamelCase_ ( self : List[Any] ): pass @unittest.skip(reason="MobileViTV2 does not output attentions" ) def UpperCamelCase_ ( self : List[Any] ): pass @require_torch_multi_gpu @unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." ) def UpperCamelCase_ ( self : int ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def UpperCamelCase_ ( self : Optional[int] ): pass def UpperCamelCase_ ( self : Dict ): __A , __A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A = model_class(A ) __A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __A = [*signature.parameters.keys()] __A = ["pixel_values"] self.assertListEqual(arg_names[:1] ,A ) def UpperCamelCase_ ( self : Any ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self : Optional[Any] ): def check_hidden_states_output(A : Dict ,A : Optional[int] ,A : Any ): __A = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): __A = model(**self._prepare_for_class(A ,A ) ) __A = outputs.hidden_states __A = 5 self.assertEqual(len(A ) ,A ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __A = 2 for i in range(len(A ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,) divisor *= 2 self.assertEqual(self.model_tester.output_stride ,divisor // 2 ) __A , __A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A = True check_hidden_states_output(A ,A ,A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __A = True check_hidden_states_output(A ,A ,A ) def UpperCamelCase_ ( self : Any ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def UpperCamelCase_ ( self : Optional[int] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A ) @slow def UpperCamelCase_ ( self : Optional[Any] ): for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A = MobileViTVaModel.from_pretrained(A ) self.assertIsNotNone(A ) def UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" __A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self : Any ): return ( MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self : str ): __A = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to( A ) __A = self.default_image_processor __A = prepare_img() __A = image_processor(images=A ,return_tensors="pt" ).to(A ) # forward pass with torch.no_grad(): __A = model(**A ) # verify the logits __A = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape ,A ) __A = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : int ): __A = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) __A = model.to(A ) __A = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) __A = prepare_img() __A = image_processor(images=A ,return_tensors="pt" ).to(A ) # forward pass with torch.no_grad(): __A = model(**A ) __A = outputs.logits # verify the logits __A = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape ,A ) __A = torch.tensor( [ [[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]], [[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]], [[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]], ] ,device=A ,) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,A ,atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : List[Any] ): __A = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) __A = model.to(A ) __A = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) __A = prepare_img() __A = image_processor(images=A ,return_tensors="pt" ).to(A ) # forward pass with torch.no_grad(): __A = model(**A ) __A = outputs.logits.detach().cpu() __A = image_processor.post_process_semantic_segmentation(outputs=A ,target_sizes=[(50, 60)] ) __A = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape ,A ) __A = image_processor.post_process_semantic_segmentation(outputs=A ) __A = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape ,A )
15
import argparse import math import traceback import dateutil.parser as date_parser import requests def UpperCAmelCase ( a_ ) -> str: """simple docstring""" __A = {} __A = job["started_at"] __A = job["completed_at"] __A = date_parser.parse(a_ ) __A = date_parser.parse(a_ ) __A = round((end_datetime - start_datetime).total_seconds() / 60.0 ) __A = start __A = end __A = duration_in_min return job_info def UpperCAmelCase ( a_ , a_=None ) -> str: """simple docstring""" __A = None if token is not None: __A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} __A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' __A = requests.get(a_ , headers=a_ ).json() __A = {} try: job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} ) __A = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 ) for i in range(a_ ): __A = requests.get(url + F'''&page={i + 2}''' , headers=a_ ).json() job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args() SCREAMING_SNAKE_CASE :Union[str, Any] = get_job_time(args.workflow_run_id) SCREAMING_SNAKE_CASE :Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'''{k}: {v["duration"]}''')
15
1
import math def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list: """simple docstring""" __A = end or len(a_ ) for i in range(a_ , a_ ): __A = i __A = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __A = array[temp_index - 1] temp_index -= 1 __A = temp_index_value return array def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap """simple docstring""" __A = index __A = 2 * index + 1 # Left Node __A = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __A = left_index if right_index < heap_size and array[largest] < array[right_index]: __A = right_index if largest != index: __A , __A = array[largest], array[index] heapify(a_ , a_ , a_ ) def UpperCAmelCase ( a_ ) -> list: """simple docstring""" __A = len(a_ ) for i in range(n // 2 , -1 , -1 ): heapify(a_ , a_ , a_ ) for i in range(n - 1 , 0 , -1 ): __A , __A = array[0], array[i] heapify(a_ , 0 , a_ ) return array def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" __A = low __A = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __A , __A = array[j], array[i] i += 1 def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) == 0: return array __A = 2 * math.ceil(math.loga(len(a_ ) ) ) __A = 1_6 return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ ) def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(a_ ) max_depth -= 1 __A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 ) __A = partition(a_ , a_ , a_ , a_ ) intro_sort(a_ , a_ , a_ , a_ , a_ ) __A = p return insertion_sort(a_ , a_ , a_ ) if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip() SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')] print(sort(unsorted))
15
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" __A = args.pruning_method __A = args.threshold __A = args.model_name_or_path.rstrip("/" ) __A = args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) __A = torch.load(os.path.join(a_ , "pytorch_model.bin" ) ) __A = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "bias" in name: __A = tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": __A = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = TopKBinarizer.apply(a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = ThresholdBinarizer.apply(a_ , a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A , __A = -0.1, 1.1 __A = torch.sigmoid(a_ ) __A = s * (r - l) + l __A = s_bar.clamp(min=0.0 , max=1.0 ) __A = tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError("Unknown pruning method" ) if target_model_path is None: __A = os.path.join( os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' ) if not os.path.isdir(a_ ): shutil.copytree(a_ , a_ ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(a_ , os.path.join(a_ , "pytorch_model.bin" ) ) print("\nPruned model saved! See you later!" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) SCREAMING_SNAKE_CASE :str = parser.parse_args() main(args)
15
1
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : int ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCamelCase_ ( self : str ): __A = 1 __A = 3 __A = (32, 32) __A = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(A ) return image @property def UpperCamelCase_ ( self : Dict ): torch.manual_seed(0 ) __A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,) return model @property def UpperCamelCase_ ( self : Any ): torch.manual_seed(0 ) __A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,) return model @property def UpperCamelCase_ ( self : List[str] ): torch.manual_seed(0 ) __A = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) return CLIPTextModel(A ) @property def UpperCamelCase_ ( self : Tuple ): def extract(*A : Any ,**A : Optional[int] ): class UpperCAmelCase : '''simple docstring''' def __init__( self : List[Any] ): __A = torch.ones([0] ) def UpperCamelCase_ ( self : List[Any] ,A : Optional[int] ): self.pixel_values.to(A ) return self return Out() return extract def UpperCamelCase_ ( self : Tuple ): __A = "cpu" # ensure determinism for the device-dependent torch.Generator __A = self.dummy_cond_unet __A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule="scaled_linear" ,clip_sample=A ,set_alpha_to_one=A ,) __A = self.dummy_vae __A = self.dummy_text_encoder __A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk __A = StableDiffusionPipeline( unet=A ,scheduler=A ,vae=A ,text_encoder=A ,tokenizer=A ,safety_checker=A ,feature_extractor=self.dummy_extractor ,) __A = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) __A = "A painting of a squirrel eating a burger" __A = torch.Generator(device=A ).manual_seed(0 ) __A = sd_pipe([prompt] ,generator=A ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="np" ) __A = output.images __A = torch.Generator(device=A ).manual_seed(0 ) __A = sd_pipe( [prompt] ,generator=A ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="np" ,return_dict=A ,)[0] __A = image[0, -3:, -3:, -1] __A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __A = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self : Any ): __A = "cpu" # ensure determinism for the device-dependent torch.Generator __A = self.dummy_cond_unet __A = PNDMScheduler(skip_prk_steps=A ) __A = self.dummy_vae __A = self.dummy_text_encoder __A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk __A = StableDiffusionPipeline( unet=A ,scheduler=A ,vae=A ,text_encoder=A ,tokenizer=A ,safety_checker=A ,feature_extractor=self.dummy_extractor ,) __A = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) __A = "A painting of a squirrel eating a burger" __A = torch.Generator(device=A ).manual_seed(0 ) __A = sd_pipe([prompt] ,generator=A ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="np" ) __A = output.images __A = torch.Generator(device=A ).manual_seed(0 ) __A = sd_pipe( [prompt] ,generator=A ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="np" ,return_dict=A ,)[0] __A = image[0, -3:, -3:, -1] __A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __A = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self : Optional[int] ): __A = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe" ,safety_checker=A ) assert isinstance(A ,A ) assert isinstance(pipe.scheduler ,A ) assert pipe.safety_checker is None __A = pipe("example prompt" ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(A ) __A = StableDiffusionPipeline.from_pretrained(A ) # sanity check that the pipeline still works assert pipe.safety_checker is None __A = pipe("example prompt" ,num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda" ,"This test requires a GPU" ) def UpperCamelCase_ ( self : Dict ): __A = self.dummy_cond_unet __A = PNDMScheduler(skip_prk_steps=A ) __A = self.dummy_vae __A = self.dummy_text_encoder __A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # put models in fp16 __A = unet.half() __A = vae.half() __A = bert.half() # make sure here that pndm scheduler skips prk __A = StableDiffusionPipeline( unet=A ,scheduler=A ,vae=A ,text_encoder=A ,tokenizer=A ,safety_checker=A ,feature_extractor=self.dummy_extractor ,) __A = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) __A = "A painting of a squirrel eating a burger" __A = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="np" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self : Dict ): __A = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" ,safety_checker=A ) __A = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __A = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) __A = ( "portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle" " coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with" " anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and" " children from bahnhof zoo, detailed " ) __A = 40_03_66_03_46 __A = 7 # without safety guidance (sld_guidance_scale = 0) __A = torch.manual_seed(A ) __A = sd_pipe( [prompt] ,generator=A ,guidance_scale=A ,num_inference_steps=50 ,output_type="np" ,width=5_12 ,height=5_12 ,sld_guidance_scale=0 ,) __A = output.images __A = image[0, -3:, -3:, -1] __A = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) __A = torch.manual_seed(A ) __A = sd_pipe( [prompt] ,generator=A ,guidance_scale=A ,num_inference_steps=50 ,output_type="np" ,width=5_12 ,height=5_12 ,sld_guidance_scale=20_00 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) __A = output.images __A = image[0, -3:, -3:, -1] __A = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self : List[str] ): __A = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" ,safety_checker=A ) __A = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __A = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) __A = "padme amidala taking a bath artwork, safe for work, no nudity" __A = 27_34_97_17_55 __A = 7 __A = torch.manual_seed(A ) __A = sd_pipe( [prompt] ,generator=A ,guidance_scale=A ,num_inference_steps=50 ,output_type="np" ,width=5_12 ,height=5_12 ,sld_guidance_scale=0 ,) __A = output.images __A = image[0, -3:, -3:, -1] __A = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 __A = torch.manual_seed(A ) __A = sd_pipe( [prompt] ,generator=A ,guidance_scale=A ,num_inference_steps=50 ,output_type="np" ,width=5_12 ,height=5_12 ,sld_guidance_scale=20_00 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) __A = output.images __A = image[0, -3:, -3:, -1] __A = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self : Dict ): __A = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" ) __A = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) __A = ( "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c." " leyendecker" ) __A = 10_44_35_52_34 __A = 12 __A = torch.manual_seed(A ) __A = sd_pipe( [prompt] ,generator=A ,guidance_scale=A ,num_inference_steps=50 ,output_type="np" ,width=5_12 ,height=5_12 ,sld_guidance_scale=0 ,) __A = output.images __A = image[0, -3:, -3:, -1] __A = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 __A = torch.manual_seed(A ) __A = sd_pipe( [prompt] ,generator=A ,guidance_scale=A ,num_inference_steps=50 ,output_type="np" ,width=5_12 ,height=5_12 ,sld_guidance_scale=20_00 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) __A = output.images __A = image[0, -3:, -3:, -1] __A = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
15
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :int = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE :Union[str, Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } SCREAMING_SNAKE_CASE :int = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] snake_case_ = [] def __init__( self : Any ,A : List[str] ,A : str="<unk>" ,A : int="<s>" ,A : Union[str, Any]="</s>" ,A : List[str]="<pad>" ,A : int="[SEP]" ,A : Optional[Any]="[MASK]" ,A : Tuple="[CLS]" ,A : Optional[Dict[str, Any]] = None ,**A : Any ,): __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token __A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sep_token=A ,mask_token=A ,cls_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = vocab_file __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) @property def UpperCamelCase_ ( self : List[str] ): return self.sp_model.get_piece_size() def UpperCamelCase_ ( self : Optional[Any] ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[int] ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : str ,A : Optional[Any] ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : List[str] ,A : Tuple ): return self.sp_model.piece_to_id(A ) def UpperCamelCase_ ( self : List[Any] ,A : Tuple ): __A = self.sp_model.IdToPiece(A ) return token def UpperCamelCase_ ( self : List[Any] ,A : int ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string.strip() def UpperCamelCase_ ( self : Tuple ,A : List[int] ,A : bool = False ,A : bool = None ,A : bool = True ,**A : Union[str, Any] ,): __A = kwargs.pop("use_source_tokenizer" ,A ) __A = self.convert_ids_to_tokens(A ,skip_special_tokens=A ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __A = [] __A = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) __A = [] sub_texts.append(A ) else: current_sub_text.append(A ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: __A = re.sub(R" (\[(MASK|SEP)\])" ,R"\1" ," ".join(A ) ) else: __A = "".join(A ) __A = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __A = self.clean_up_tokenization(A ) return clean_text else: return text def UpperCamelCase_ ( self : str ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A = [self.cls_token_id] __A = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self : Optional[int] ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1] def UpperCamelCase_ ( self : Any ,A : List[int] ,A : Optional[List[int]] = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
15
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Tuple = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "roberta" def __init__( self : Optional[int] ,A : Optional[int]=5_02_65 ,A : Union[str, Any]=7_68 ,A : Tuple=12 ,A : Any=12 ,A : List[Any]=30_72 ,A : str="gelu" ,A : Dict=0.1 ,A : Union[str, Any]=0.1 ,A : Dict=5_12 ,A : Union[str, Any]=2 ,A : Tuple=0.02 ,A : Dict=1E-12 ,A : Optional[Any]=1 ,A : int=0 ,A : Union[str, Any]=2 ,A : str="absolute" ,A : Optional[int]=True ,A : Dict=None ,**A : Optional[Any] ,): super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A ) __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = hidden_act __A = intermediate_size __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = initializer_range __A = layer_norm_eps __A = position_embedding_type __A = use_cache __A = classifier_dropout class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def UpperCamelCase_ ( self : List[Any] ): if self.task == "multiple-choice": __A = {0: "batch", 1: "choice", 2: "sequence"} else: __A = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
15
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): SCREAMING_SNAKE_CASE :Any = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: SCREAMING_SNAKE_CASE :int = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def UpperCAmelCase ( a_ ) -> Optional[Any]: """simple docstring""" __A = (images / 2 + 0.5).clamp(0 , 1 ) __A = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __A = numpy_to_pil(a_ ) return images def UpperCAmelCase ( a_ ) -> int: """simple docstring""" if images.ndim == 3: __A = images[None, ...] __A = (images * 2_5_5).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images __A = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: __A = [Image.fromarray(a_ ) for image in images] return pil_images
15
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE :List[str] = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Any = [ 'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwinForImageClassification', 'SwinForMaskedImageModeling', 'SwinModel', 'SwinPreTrainedModel', 'SwinBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Tuple = [ 'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSwinForImageClassification', 'TFSwinForMaskedImageModeling', 'TFSwinModel', 'TFSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :List[Any] = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "yolos" def __init__( self : Any ,A : Optional[Any]=7_68 ,A : Dict=12 ,A : Any=12 ,A : str=30_72 ,A : Any="gelu" ,A : str=0.0 ,A : List[str]=0.0 ,A : Dict=0.02 ,A : int=1E-12 ,A : Tuple=[5_12, 8_64] ,A : List[Any]=16 ,A : str=3 ,A : str=True ,A : Any=1_00 ,A : Dict=True ,A : Dict=False ,A : Tuple=1 ,A : Union[str, Any]=5 ,A : Optional[Any]=2 ,A : Union[str, Any]=5 ,A : int=2 ,A : int=0.1 ,**A : List[str] ,): super().__init__(**A ) __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = initializer_range __A = layer_norm_eps __A = image_size __A = patch_size __A = num_channels __A = qkv_bias __A = num_detection_tokens __A = use_mid_position_embeddings __A = auxiliary_loss # Hungarian matcher __A = class_cost __A = bbox_cost __A = giou_cost # Loss coefficients __A = bbox_loss_coefficient __A = giou_loss_coefficient __A = eos_coefficient class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = version.parse("1.11" ) @property def UpperCamelCase_ ( self : str ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase_ ( self : List[Any] ): return 1E-4 @property def UpperCamelCase_ ( self : Optional[Any] ): return 12
15
1
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) SCREAMING_SNAKE_CASE :Optional[Any] = 2_9979_2458 # Symbols SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Tuple = symbols('ct x y z') def UpperCAmelCase ( a_ ) -> float: """simple docstring""" if velocity > c: raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("Speed must be greater than or equal to 1!" ) return velocity / c def UpperCAmelCase ( a_ ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(a_ ) ** 2 ) def UpperCAmelCase ( a_ ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(a_ ), -gamma(a_ ) * beta(a_ ), 0, 0], [-gamma(a_ ) * beta(a_ ), gamma(a_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def UpperCAmelCase ( a_ , a_ = None ) -> np.ndarray: """simple docstring""" if event is None: __A = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(a_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: SCREAMING_SNAKE_CASE :Any = transform(2997_9245) print('Example of four vector: ') print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values SCREAMING_SNAKE_CASE :Union[str, Any] = {ct: c, x: 1, y: 1, z: 1} SCREAMING_SNAKE_CASE :Optional[Any] = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
15
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) SCREAMING_SNAKE_CASE :List[str] = 'pytorch_model.bin' SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json' SCREAMING_SNAKE_CASE :Optional[int] = 'adapter_config.json' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.bin' SCREAMING_SNAKE_CASE :Dict = 'adapter_model.safetensors' SCREAMING_SNAKE_CASE :str = 'tf_model.h5' SCREAMING_SNAKE_CASE :List[Any] = 'tf_model.h5.index.json' SCREAMING_SNAKE_CASE :str = 'model.ckpt' SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack' SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack.index.json' SCREAMING_SNAKE_CASE :Tuple = 'model.safetensors' SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors.index.json' SCREAMING_SNAKE_CASE :str = 'config.json' SCREAMING_SNAKE_CASE :int = 'preprocessor_config.json' SCREAMING_SNAKE_CASE :Optional[Any] = FEATURE_EXTRACTOR_NAME SCREAMING_SNAKE_CASE :Optional[int] = 'generation_config.json' SCREAMING_SNAKE_CASE :List[str] = 'modelcard.json' SCREAMING_SNAKE_CASE :Optional[int] = '▁' SCREAMING_SNAKE_CASE :Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility SCREAMING_SNAKE_CASE :str = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. SCREAMING_SNAKE_CASE :Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] SCREAMING_SNAKE_CASE :List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def UpperCAmelCase ( a_ ) -> Dict: """simple docstring""" if version.parse(a_ ) < version.parse(a_ ): if "dev" in min_version: __A = ( "This example requires a source install from HuggingFace Transformers (see " "`https://huggingface.co/docs/transformers/installation#install-from-source`)," ) else: __A = F'''This example requires a minimum version of {min_version},''' error_message += F''' but the version found is {__version__}.\n''' raise ImportError( error_message + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " "versions of HuggingFace Transformers." )
15
1