code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class SCREAMING_SNAKE_CASE__ ( enum.Enum ): __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 @add_end_docstrings(_UpperCamelCase ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """ In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> """ def __init__( self : str , *a_ : Optional[Any] , **a_ : str ): """simple docstring""" super().__init__(*a_ , **a_ ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __snake_case = None if self.model.config.prefix is not None: __snake_case = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __snake_case = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __snake_case , __snake_case , __snake_case = self._sanitize_parameters(prefix=a_ , **self._forward_params ) __snake_case = {**self._preprocess_params, **preprocess_params} __snake_case = {**self._forward_params, **forward_params} def A ( self : Dict , a_ : int=None , a_ : str=None , a_ : Tuple=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Dict=None , a_ : Union[str, Any]=None , a_ : int=None , **a_ : Tuple , ): """simple docstring""" __snake_case = {} if prefix is not None: __snake_case = prefix if prefix: __snake_case = self.tokenizer( a_ , padding=a_ , add_special_tokens=a_ , return_tensors=self.framework ) __snake_case = prefix_inputs["input_ids"].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' " [None, 'hole']" ) __snake_case = handle_long_generation preprocess_params.update(a_ ) __snake_case = generate_kwargs __snake_case = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_full_text`" ) if return_tensors is not None: raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" ) __snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_tensors`" ) __snake_case = ReturnType.TENSORS if return_type is not None: __snake_case = return_type if clean_up_tokenization_spaces is not None: __snake_case = clean_up_tokenization_spaces if stop_sequence is not None: __snake_case = self.tokenizer.encode(a_ , add_special_tokens=a_ ) if len(a_ ) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) __snake_case = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def A ( self : str , *a_ : str , **a_ : Optional[Any] ): """simple docstring""" if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"add_space_before_punct_symbol": True} ) return super()._parse_and_tokenize(*a_ , **a_ ) def __call__( self : Union[str, Any] , a_ : Dict , **a_ : str ): """simple docstring""" return super().__call__(a_ , **a_ ) def A ( self : Union[str, Any] , a_ : Dict , a_ : List[Any]="" , a_ : List[Any]=None , **a_ : Any ): """simple docstring""" __snake_case = self.tokenizer( prefix + prompt_text , padding=a_ , add_special_tokens=a_ , return_tensors=self.framework ) __snake_case = prompt_text if handle_long_generation == "hole": __snake_case = inputs["input_ids"].shape[-1] if "max_new_tokens" in generate_kwargs: __snake_case = generate_kwargs["max_new_tokens"] else: __snake_case = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError("We cannot infer how many new tokens are expected" ) if cur_len + new_tokens > self.tokenizer.model_max_length: __snake_case = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( "We cannot use `hole` to handle this generation the number of desired tokens exceeds the" " models max length" ) __snake_case = inputs["input_ids"][:, -keep_length:] if "attention_mask" in inputs: __snake_case = inputs["attention_mask"][:, -keep_length:] return inputs def A ( self : Tuple , a_ : Dict , **a_ : Optional[Any] ): """simple docstring""" __snake_case = model_inputs["input_ids"] __snake_case = model_inputs.get("attention_mask" , a_ ) # Allow empty prompts if input_ids.shape[1] == 0: __snake_case = None __snake_case = None __snake_case = 1 else: __snake_case = input_ids.shape[0] __snake_case = model_inputs.pop("prompt_text" ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __snake_case = generate_kwargs.pop("prefix_length" , 0 ) if prefix_length > 0: __snake_case = "max_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].max_new_tokens is not None ) if not has_max_new_tokens: __snake_case = generate_kwargs.get("max_length" ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __snake_case = "min_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __snake_case = self.model.generate(input_ids=a_ , attention_mask=a_ , **a_ ) __snake_case = generated_sequence.shape[0] if self.framework == "pt": __snake_case = generated_sequence.reshape(a_ , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __snake_case = tf.reshape(a_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def A ( self : int , a_ : int , a_ : Any=ReturnType.FULL_TEXT , a_ : Optional[Any]=True ): """simple docstring""" __snake_case = model_outputs["generated_sequence"][0] __snake_case = model_outputs["input_ids"] __snake_case = model_outputs["prompt_text"] __snake_case = generated_sequence.numpy().tolist() __snake_case = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __snake_case = {"generated_token_ids": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __snake_case = self.tokenizer.decode( a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __snake_case = 0 else: __snake_case = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ , ) ) if return_type == ReturnType.FULL_TEXT: __snake_case = prompt_text + text[prompt_length:] else: __snake_case = text[prompt_length:] __snake_case = {"generated_text": all_text} records.append(a_ ) return records
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str: if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("iterations must be defined as integers" ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) __snake_case = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_UpperCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int ) -> list[int]: if num <= 0: raise ValueError("Input must be a positive integer" ) __snake_case = [True] * (num + 1) __snake_case = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , _UpperCAmelCase ): __snake_case = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() a : Union[str, Any] = int(input('''Enter a positive integer: ''').strip()) print(prime_sieve_eratosthenes(user_num))
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str: if number > 0: raise ValueError("input must be a negative integer" ) __snake_case = len(bin(_UpperCAmelCase )[3:] ) __snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:] __snake_case = ( ( "1" + "0" * (binary_number_length - len(_UpperCAmelCase )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput a : Optional[Any] = 8 def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=BITS ) -> str: __snake_case = x.device __snake_case = (x * 2_55).int().clamp(0 , 2_55 ) __snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_UpperCAmelCase ) __snake_case = rearrange(_UpperCAmelCase , "d -> d 1 1" ) __snake_case = rearrange(_UpperCAmelCase , "b c h w -> b c 1 h w" ) __snake_case = ((x & mask) != 0).float() __snake_case = rearrange(_UpperCAmelCase , "b c d h w -> b (c d) h w" ) __snake_case = bits * 2 - 1 return bits def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=BITS ) -> Optional[Any]: __snake_case = x.device __snake_case = (x > 0).int() __snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_UpperCAmelCase , dtype=torch.intaa ) __snake_case = rearrange(_UpperCAmelCase , "d -> d 1 1" ) __snake_case = rearrange(_UpperCAmelCase , "b (c d) h w -> b c d h w" , d=8 ) __snake_case = reduce(x * mask , "b c d h w -> b c h w" , "sum" ) return (dec / 2_55).clamp(0.0 , 1.0 ) def __UpperCAmelCase ( self : Union[str, Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) __snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas __snake_case = self.alphas_cumprod[timestep] __snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod __snake_case = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" __snake_case = self.bit_scale if self.config.clip_sample: __snake_case = torch.clamp(_UpperCAmelCase , -scale , _UpperCAmelCase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) __snake_case = self._get_variance(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide __snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 __snake_case = model_output.device if torch.is_tensor(_UpperCAmelCase ) else "cpu" __snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_UpperCAmelCase ).to(_UpperCAmelCase ) __snake_case = self._get_variance(_UpperCAmelCase , _UpperCAmelCase ) ** 0.5 * eta * noise __snake_case = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase ) def __UpperCAmelCase ( self : Union[str, Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Dict="epsilon" , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]: __snake_case = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: __snake_case , __snake_case = torch.split(_UpperCAmelCase , sample.shape[1] , dim=1 ) else: __snake_case = None # 1. compute alphas, betas __snake_case = self.alphas_cumprod[t] __snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one __snake_case = 1 - alpha_prod_t __snake_case = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": __snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": __snake_case = model_output else: raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' ) # 3. Clip "predicted x_0" __snake_case = self.bit_scale if self.config.clip_sample: __snake_case = torch.clamp(_UpperCAmelCase , -scale , _UpperCAmelCase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t __snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise __snake_case = 0 if t > 0: __snake_case = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_UpperCAmelCase ).to(model_output.device ) __snake_case = (self._get_variance(_UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise __snake_case = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : Optional[Any] , a_ : UNetaDConditionModel , a_ : Union[DDIMScheduler, DDPMScheduler] , a_ : Optional[float] = 1.0 , ): """simple docstring""" super().__init__() __snake_case = bit_scale __snake_case = ( ddim_bit_scheduler_step if isinstance(a_ , a_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=a_ , scheduler=a_ ) @torch.no_grad() def __call__( self : Dict , a_ : Optional[int] = 256 , a_ : Optional[int] = 256 , a_ : Optional[int] = 50 , a_ : Optional[torch.Generator] = None , a_ : Optional[int] = 1 , a_ : Optional[str] = "pil" , a_ : bool = True , **a_ : int , ): """simple docstring""" __snake_case = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=a_ , ) __snake_case = decimal_to_bits(a_ ) * self.bit_scale __snake_case = latents.to(self.device ) self.scheduler.set_timesteps(a_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual __snake_case = self.unet(a_ , a_ ).sample # compute the previous noisy sample x_t -> x_t-1 __snake_case = self.scheduler.step(a_ , a_ , a_ ).prev_sample __snake_case = bits_to_decimal(a_ ) if output_type == "pil": __snake_case = self.numpy_to_pil(a_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=a_ )
680
'''simple docstring''' from timeit import timeit def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int: if number < 0: raise ValueError("the value of input must not be negative" ) __snake_case = 0 while number: number &= number - 1 result += 1 return result def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int: if number < 0: raise ValueError("the value of input must not be negative" ) __snake_case = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def __UpperCAmelCase ( ) -> None: def do_benchmark(_UpperCAmelCase : int ) -> None: __snake_case = "import __main__ as z" print(F'''Benchmark when {number = }:''' ) print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' ) __snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase ) print(F'''timeit() runs in {timing} seconds''' ) print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' ) __snake_case = timeit( "z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , ) print(F'''timeit() runs in {timing} seconds''' ) for number in (25, 37, 58, 0): do_benchmark(_UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
680
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor a : Tuple = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : Dict , *a_ : Dict , **a_ : Any ): """simple docstring""" warnings.warn( "The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use DeformableDetrImageProcessor instead." , a_ , ) super().__init__(*a_ , **a_ )
680
'''simple docstring''' import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch a : Dict = '''sshleifer/bart-tiny-random''' a : str = '''patrickvonplaten/t5-tiny-random''' @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def A ( self : Union[str, Any] ): """simple docstring""" return AutoConfig.from_pretrained(a_ ) def A ( self : str ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def A ( self : Optional[Any] ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ ) def A ( self : Dict ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def A ( self : Optional[int] ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def A ( self : Dict ): """simple docstring""" with self.assertRaises(a_ ): create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
680
1
'''simple docstring''' from random import shuffle import tensorflow as tf from numpy import array def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> str: __snake_case = int(_UpperCAmelCase ) assert noofclusters < len(_UpperCAmelCase ) # Find out the dimensionality __snake_case = len(vectors[0] ) # Will help select random centroids from among the available vectors __snake_case = list(range(len(_UpperCAmelCase ) ) ) shuffle(_UpperCAmelCase ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. __snake_case = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION __snake_case = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points __snake_case = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(_UpperCAmelCase ) ] ##These nodes will assign the centroid Variables the appropriate ##values __snake_case = tf.placeholder("float64" , [dim] ) __snake_case = [] for centroid in centroids: cent_assigns.append(tf.assign(_UpperCAmelCase , _UpperCAmelCase ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) __snake_case = [tf.Variable(0 ) for i in range(len(_UpperCAmelCase ) )] ##These nodes will assign an assignment Variable the appropriate ##value __snake_case = tf.placeholder("int32" ) __snake_case = [] for assignment in assignments: cluster_assigns.append(tf.assign(_UpperCAmelCase , _UpperCAmelCase ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input __snake_case = tf.placeholder("float" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors __snake_case = tf.reduce_mean(_UpperCAmelCase , 0 ) ##Node for computing Euclidean distances # Placeholders for input __snake_case = tf.placeholder("float" , [dim] ) __snake_case = tf.placeholder("float" , [dim] ) __snake_case = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_UpperCAmelCase , _UpperCAmelCase ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input __snake_case = tf.placeholder("float" , [noofclusters] ) __snake_case = tf.argmin(_UpperCAmelCase , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. __snake_case = tf.initialize_all_variables() # Initialize all variables sess.run(_UpperCAmelCase ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. __snake_case = 1_00 for _ in range(_UpperCAmelCase ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(_UpperCAmelCase ) ): __snake_case = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. __snake_case = [ sess.run(_UpperCAmelCase , feed_dict={va: vect, va: sess.run(_UpperCAmelCase )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input __snake_case = sess.run( _UpperCAmelCase , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(_UpperCAmelCase ): # Collect all the vectors assigned to this cluster __snake_case = [ vectors[i] for i in range(len(_UpperCAmelCase ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location __snake_case = sess.run( _UpperCAmelCase , feed_dict={mean_input: array(_UpperCAmelCase )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments __snake_case = sess.run(_UpperCAmelCase ) __snake_case = sess.run(_UpperCAmelCase ) return centroids, assignments
680
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors a : Any = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """sequence-classification""" def __init__( self : List[str] , a_ : str ): """simple docstring""" if type(a_ ) == dict: __snake_case = Namespace(**a_ ) __snake_case = glue_output_modes[hparams.task] __snake_case = glue_tasks_num_labels[hparams.task] super().__init__(a_ , a_ , self.mode ) def A ( self : Union[str, Any] , **a_ : List[Any] ): """simple docstring""" return self.model(**a_ ) def A ( self : int , a_ : Optional[Any] , a_ : int ): """simple docstring""" __snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __snake_case = self(**a_ ) __snake_case = outputs[0] __snake_case = self.trainer.lr_schedulers[0]["scheduler"] __snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def A ( self : List[str] ): """simple docstring""" __snake_case = self.hparams __snake_case = processors[args.task]() __snake_case = processor.get_labels() for mode in ["train", "dev"]: __snake_case = self._feature_file(a_ ) if os.path.exists(a_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , a_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) __snake_case = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) __snake_case = convert_examples_to_features( a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , a_ ) torch.save(a_ , a_ ) def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ): """simple docstring""" __snake_case = "dev" if mode == "test" else mode __snake_case = self._feature_file(a_ ) logger.info("Loading features from cached file %s" , a_ ) __snake_case = torch.load(a_ ) __snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) __snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": __snake_case = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": __snake_case = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , ) def A ( self : int , a_ : List[str] , a_ : Tuple ): """simple docstring""" __snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __snake_case = self(**a_ ) __snake_case , __snake_case = outputs[:2] __snake_case = logits.detach().cpu().numpy() __snake_case = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def A ( self : Dict , a_ : Optional[int] ): """simple docstring""" __snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() __snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": __snake_case = np.argmax(a_ , axis=1 ) elif self.hparams.glue_output_mode == "regression": __snake_case = np.squeeze(a_ ) __snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 ) __snake_case = [[] for _ in range(out_label_ids.shape[0] )] __snake_case = [[] for _ in range(out_label_ids.shape[0] )] __snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )} __snake_case = dict(results.items() ) __snake_case = results return ret, preds_list, out_label_list def A ( self : Tuple , a_ : list ): """simple docstring""" __snake_case , __snake_case , __snake_case = self._eval_end(a_ ) __snake_case = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def A ( self : int , a_ : Tuple ): """simple docstring""" __snake_case , __snake_case , __snake_case = self._eval_end(a_ ) __snake_case = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def A ( a_ : str , a_ : Any ): """simple docstring""" BaseTransformer.add_model_specific_args(a_ , a_ ) parser.add_argument( "--max_seq_length" , default=128 , type=a_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def __UpperCAmelCase ( ) -> Union[str, Any]: __snake_case = argparse.ArgumentParser() add_generic_args(_UpperCAmelCase , os.getcwd() ) __snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() ) __snake_case = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: __snake_case = os.path.join( "./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , ) os.makedirs(args.output_dir ) __snake_case = GLUETransformer(_UpperCAmelCase ) __snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: __snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) ) __snake_case = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_UpperCAmelCase ) if __name__ == "__main__": main()
680
1
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel a : Tuple = logging.getLogger(__name__) def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> int: # save results if os.path.exists(_UpperCAmelCase ): if os.path.exists(os.path.join(_UpperCAmelCase , "config.json" ) ) and os.path.isfile( os.path.join(_UpperCAmelCase , "config.json" ) ): os.remove(os.path.join(_UpperCAmelCase , "config.json" ) ) if os.path.exists(os.path.join(_UpperCAmelCase , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(_UpperCAmelCase , "pytorch_model.bin" ) ): os.remove(os.path.join(_UpperCAmelCase , "pytorch_model.bin" ) ) else: os.makedirs(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=False ) -> Any: __snake_case = 2 if unlogit: __snake_case = torch.pow(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = p * torch.log(_UpperCAmelCase ) __snake_case = 0 return -plogp.sum(dim=-1 ) def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> List[str]: logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(_UpperCAmelCase ) ) ) ) for row in range(len(_UpperCAmelCase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : int=False ) -> Any: __snake_case , __snake_case = model.config.num_hidden_layers, model.config.num_attention_heads __snake_case = torch.zeros(_UpperCAmelCase , _UpperCAmelCase ).to(args.device ) __snake_case = torch.zeros(_UpperCAmelCase , _UpperCAmelCase ).to(args.device ) if head_mask is None: __snake_case = torch.ones(_UpperCAmelCase , _UpperCAmelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=_UpperCAmelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: __snake_case = None __snake_case = 0.0 __snake_case = 0.0 for step, inputs in enumerate(tqdm(_UpperCAmelCase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): __snake_case = tuple(t.to(args.device ) for t in inputs ) ((__snake_case) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) __snake_case = model(_UpperCAmelCase , labels=_UpperCAmelCase , head_mask=_UpperCAmelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) __snake_case , __snake_case , __snake_case = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(_UpperCAmelCase ): __snake_case = entropy(attn.detach() , _UpperCAmelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(_UpperCAmelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: __snake_case = 2 __snake_case = torch.pow(torch.pow(_UpperCAmelCase , _UpperCAmelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0 if not args.dont_normalize_global_importance: __snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(_UpperCAmelCase ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(_UpperCAmelCase ) logger.info("Head ranked by importance scores" ) __snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) __snake_case = torch.arange( head_importance.numel() , device=args.device ) __snake_case = head_ranks.view_as(_UpperCAmelCase ) print_ad_tensor(_UpperCAmelCase ) return attn_entropy, head_importance, total_loss def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> List[Any]: __snake_case , __snake_case , __snake_case = compute_heads_importance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , compute_entropy=_UpperCAmelCase ) __snake_case = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , _UpperCAmelCase , original_score * args.masking_threshold ) __snake_case = torch.ones_like(_UpperCAmelCase ) __snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) __snake_case = original_score while current_score >= original_score * args.masking_threshold: __snake_case = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads __snake_case = float("Inf" ) __snake_case = head_importance.view(-1 ).sort()[1] if len(_UpperCAmelCase ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads __snake_case = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) __snake_case = new_head_mask.view(-1 ) __snake_case = 0.0 __snake_case = new_head_mask.view_as(_UpperCAmelCase ) __snake_case = new_head_mask.clone().detach() print_ad_tensor(_UpperCAmelCase ) # Compute metric and head importance again __snake_case , __snake_case , __snake_case = compute_heads_importance( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , compute_entropy=_UpperCAmelCase , head_mask=_UpperCAmelCase ) __snake_case = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , _UpperCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , ) logger.info("Final head mask" ) print_ad_tensor(_UpperCAmelCase ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ) -> str: __snake_case = datetime.now() __snake_case , __snake_case , __snake_case = compute_heads_importance( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , compute_entropy=_UpperCAmelCase , compute_importance=_UpperCAmelCase , head_mask=_UpperCAmelCase ) __snake_case = 1 / loss __snake_case = datetime.now() - before_time __snake_case = sum(p.numel() for p in model.parameters() ) __snake_case = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCAmelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __snake_case = [ v, ] assert sum(len(_UpperCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(_UpperCAmelCase ) __snake_case = sum(p.numel() for p in model.parameters() ) __snake_case = datetime.now() __snake_case , __snake_case , __snake_case = compute_heads_importance( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , compute_entropy=_UpperCAmelCase , compute_importance=_UpperCAmelCase , head_mask=_UpperCAmelCase , actually_pruned=_UpperCAmelCase , ) __snake_case = 1 / loss __snake_case = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , _UpperCAmelCase , _UpperCAmelCase , pruned_num_params / original_num_params * 1_00 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , _UpperCAmelCase , _UpperCAmelCase ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_00 ) save_model(_UpperCAmelCase , args.output_dir ) def __UpperCAmelCase ( ) -> str: __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=_UpperCAmelCase , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=_UpperCAmelCase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=_UpperCAmelCase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=_UpperCAmelCase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=_UpperCAmelCase , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=_UpperCAmelCase , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=1_28 , type=_UpperCAmelCase , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=_UpperCAmelCase , help="Batch size." ) parser.add_argument("--seed" , type=_UpperCAmelCase , default=42 ) parser.add_argument("--local_rank" , type=_UpperCAmelCase , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=_UpperCAmelCase , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=_UpperCAmelCase , default="" , help="Can be used for distant debugging." ) __snake_case = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCAmelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: __snake_case = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) __snake_case = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) __snake_case = torch.device("cuda" , args.local_rank ) __snake_case = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) __snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: __snake_case = nn.parallel.DistributedDataParallel( _UpperCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCAmelCase ) elif args.n_gpu > 1: __snake_case = nn.DataParallel(_UpperCAmelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=_UpperCAmelCase ) torch.save(_UpperCAmelCase , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , _UpperCAmelCase ) # Prepare dataset __snake_case = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) __snake_case = (torch.from_numpy(_UpperCAmelCase ),) __snake_case = TensorDataset(*_UpperCAmelCase ) __snake_case = RandomSampler(_UpperCAmelCase ) __snake_case = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: __snake_case = mask_heads(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) prune_heads(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
680
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] ) @pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase ) __snake_case = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: __snake_case = dataset_size < in_memory_max_size else: __snake_case = False __snake_case = is_small_dataset(_UpperCAmelCase ) assert result == expected
680
1
'''simple docstring''' import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any]=10_24 ) -> Union[str, Any]: __snake_case , __snake_case = [], [] __snake_case = list(zip(_UpperCAmelCase , _UpperCAmelCase ) ) __snake_case , __snake_case = sorted_examples[0] def is_too_big(_UpperCAmelCase : List[Any] ): return tok(_UpperCAmelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): __snake_case = new_src + " " + src __snake_case = new_tgt + " " + tgt if is_too_big(_UpperCAmelCase ) or is_too_big(_UpperCAmelCase ): # cant fit, finalize example finished_src.append(_UpperCAmelCase ) finished_tgt.append(_UpperCAmelCase ) __snake_case , __snake_case = src, tgt else: # can fit, keep adding __snake_case , __snake_case = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(_UpperCAmelCase ) finished_tgt.append(_UpperCAmelCase ) return finished_src, finished_tgt def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Path , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> List[Any]: __snake_case = Path(_UpperCAmelCase ) save_path.mkdir(exist_ok=_UpperCAmelCase ) for split in ["train"]: __snake_case , __snake_case = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' __snake_case = [x.rstrip() for x in Path(_UpperCAmelCase ).open().readlines()] __snake_case = [x.rstrip() for x in Path(_UpperCAmelCase ).open().readlines()] __snake_case , __snake_case = pack_examples(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) print(F'''packed {split} split from {len(_UpperCAmelCase )} examples -> {len(_UpperCAmelCase )}.''' ) Path(save_path / F'''{split}.source''' ).open("w" ).write("\n".join(_UpperCAmelCase ) ) Path(save_path / F'''{split}.target''' ).open("w" ).write("\n".join(_UpperCAmelCase ) ) for split in ["val", "test"]: __snake_case , __snake_case = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' shutil.copyfile(_UpperCAmelCase , save_path / F'''{split}.source''' ) shutil.copyfile(_UpperCAmelCase , save_path / F'''{split}.target''' ) def __UpperCAmelCase ( ) -> Tuple: __snake_case = argparse.ArgumentParser() parser.add_argument("--tok_name" , type=_UpperCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." ) parser.add_argument("--max_seq_len" , type=_UpperCAmelCase , default=1_28 ) parser.add_argument("--data_dir" , type=_UpperCAmelCase ) parser.add_argument("--save_path" , type=_UpperCAmelCase ) __snake_case = parser.parse_args() __snake_case = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(_UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float: if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float: if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' import math def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 ) -> list: __snake_case = end or len(_UpperCAmelCase ) for i in range(_UpperCAmelCase , _UpperCAmelCase ): __snake_case = i __snake_case = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __snake_case = array[temp_index - 1] temp_index -= 1 __snake_case = temp_index_value return array def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> None: # Max Heap __snake_case = index __snake_case = 2 * index + 1 # Left Node __snake_case = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __snake_case = left_index if right_index < heap_size and array[largest] < array[right_index]: __snake_case = right_index if largest != index: __snake_case , __snake_case = array[largest], array[index] heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : list ) -> list: __snake_case = len(_UpperCAmelCase ) for i in range(n // 2 , -1 , -1 ): heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for i in range(n - 1 , 0 , -1 ): __snake_case , __snake_case = array[0], array[i] heapify(_UpperCAmelCase , 0 , _UpperCAmelCase ) return array def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: __snake_case = low __snake_case = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __snake_case , __snake_case = array[j], array[i] i += 1 def __UpperCAmelCase ( _UpperCAmelCase : list ) -> list: if len(_UpperCAmelCase ) == 0: return array __snake_case = 2 * math.ceil(math.loga(len(_UpperCAmelCase ) ) ) __snake_case = 16 return intro_sort(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> list: while end - start > size_threshold: if max_depth == 0: return heap_sort(_UpperCAmelCase ) max_depth -= 1 __snake_case = median_of_a(_UpperCAmelCase , _UpperCAmelCase , start + ((end - start) // 2) + 1 , end - 1 ) __snake_case = partition(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) intro_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case = p return insertion_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() a : Union[str, Any] = input('''Enter numbers separated by a comma : ''').strip() a : Dict = [float(item) for item in user_input.split(''',''')] print(sort(unsorted))
680
'''simple docstring''' from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance a : Any = 6_378_137.0 a : List[Any] = 6_356_752.314_245 a : Dict = 6_378_137 def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float: __snake_case = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values __snake_case = (b_lata + b_lata) / 2 __snake_case = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2) __snake_case = cos(sigma / 2 ) ** 2 __snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2) __snake_case = sin(sigma / 2 ) ** 2 __snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 a : int = sys.version_info >= (3, 10) def __UpperCAmelCase ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Union[str, Any]=None ) -> List[str]: return field(default_factory=lambda: default , metadata=_UpperCAmelCase ) @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = field(default="""toto""" , metadata={"""help""": """help message"""} ) @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """titi""" __SCREAMING_SNAKE_CASE = """toto""" class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """titi""" __SCREAMING_SNAKE_CASE = """toto""" __SCREAMING_SNAKE_CASE = 42 @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = "toto" def A ( self : Dict ): """simple docstring""" __snake_case = BasicEnum(self.foo ) @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = "toto" def A ( self : List[Any] ): """simple docstring""" __snake_case = MixedTypeEnum(self.foo ) @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """help message"""} ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = list_field(default=[] ) __SCREAMING_SNAKE_CASE = list_field(default=[] ) @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = list_field(default=[] ) __SCREAMING_SNAKE_CASE = list_field(default=[1, 2, 3] ) __SCREAMING_SNAKE_CASE = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] ) __SCREAMING_SNAKE_CASE = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = field() __SCREAMING_SNAKE_CASE = field() __SCREAMING_SNAKE_CASE = field() def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = BasicEnum(self.required_enum ) @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = field() __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = field(default="""toto""" , metadata={"""help""": """help message"""} ) __SCREAMING_SNAKE_CASE = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] ) if is_python_no_less_than_3_10: @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """help message"""} ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = list_field(default=[] ) __SCREAMING_SNAKE_CASE = list_field(default=[] ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : List[Any] , a_ : argparse.ArgumentParser , a_ : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): __snake_case = {k: v for k, v in vars(a_ ).items() if k != "container"} __snake_case = {k: v for k, v in vars(a_ ).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("choices" , a_ ) and yy.get("choices" , a_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["type"](a_ ) , yy["type"](a_ ) ) del xx["type"], yy["type"] self.assertEqual(a_ , a_ ) def A ( self : Any ): """simple docstring""" __snake_case = HfArgumentParser(a_ ) __snake_case = argparse.ArgumentParser() expected.add_argument("--foo" , type=a_ , required=a_ ) expected.add_argument("--bar" , type=a_ , required=a_ ) expected.add_argument("--baz" , type=a_ , required=a_ ) expected.add_argument("--flag" , type=a_ , default=a_ , const=a_ , nargs="?" ) self.argparsersEqual(a_ , a_ ) __snake_case = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] ((__snake_case) , ) = parser.parse_args_into_dataclasses(a_ , look_for_args_file=a_ ) self.assertFalse(example.flag ) def A ( self : Tuple ): """simple docstring""" __snake_case = HfArgumentParser(a_ ) __snake_case = argparse.ArgumentParser() expected.add_argument("--foo" , default=42 , type=a_ ) expected.add_argument("--baz" , default="toto" , type=a_ , help="help message" ) self.argparsersEqual(a_ , a_ ) def A ( self : Any ): """simple docstring""" __snake_case = argparse.ArgumentParser() expected.add_argument("--foo" , type=a_ , default=a_ , const=a_ , nargs="?" ) expected.add_argument("--baz" , type=a_ , default=a_ , const=a_ , nargs="?" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("--no_baz" , action="store_false" , default=a_ , dest="baz" ) expected.add_argument("--opt" , type=a_ , default=a_ ) __snake_case = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(a_ ) for dataclass_type in dataclass_types: __snake_case = HfArgumentParser(a_ ) self.argparsersEqual(a_ , a_ ) __snake_case = parser.parse_args([] ) self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) ) __snake_case = parser.parse_args(["--foo", "--no_baz"] ) self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) ) __snake_case = parser.parse_args(["--foo", "--baz"] ) self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) ) __snake_case = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] ) self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) ) __snake_case = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] ) self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) ) def A ( self : str ): """simple docstring""" __snake_case = HfArgumentParser(a_ ) __snake_case = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(a_ , a_ ) __snake_case = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) __snake_case = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) __snake_case = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) __snake_case = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) __snake_case = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) __snake_case = parser.parse_args_into_dataclasses(["--foo", "42"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def A ( self : Any ): """simple docstring""" @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = "toto" __snake_case = HfArgumentParser(a_ ) __snake_case = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(a_ , a_ ) __snake_case = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) __snake_case = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) __snake_case = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) def A ( self : List[Any] ): """simple docstring""" __snake_case = HfArgumentParser(a_ ) __snake_case = argparse.ArgumentParser() expected.add_argument("--foo_int" , nargs="+" , default=[] , type=a_ ) expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=a_ ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=a_ ) expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=a_ ) self.argparsersEqual(a_ , a_ ) __snake_case = parser.parse_args([] ) self.assertEqual( a_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , ) __snake_case = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() ) self.assertEqual(a_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) ) def A ( self : Dict ): """simple docstring""" __snake_case = argparse.ArgumentParser() expected.add_argument("--foo" , default=a_ , type=a_ ) expected.add_argument("--bar" , default=a_ , type=a_ , help="help message" ) expected.add_argument("--baz" , default=a_ , type=a_ ) expected.add_argument("--ces" , nargs="+" , default=[] , type=a_ ) expected.add_argument("--des" , nargs="+" , default=[] , type=a_ ) __snake_case = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(a_ ) for dataclass_type in dataclass_types: __snake_case = HfArgumentParser(a_ ) self.argparsersEqual(a_ , a_ ) __snake_case = parser.parse_args([] ) self.assertEqual(a_ , Namespace(foo=a_ , bar=a_ , baz=a_ , ces=[] , des=[] ) ) __snake_case = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() ) self.assertEqual(a_ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) ) def A ( self : int ): """simple docstring""" __snake_case = HfArgumentParser(a_ ) __snake_case = argparse.ArgumentParser() expected.add_argument("--required_list" , nargs="+" , type=a_ , required=a_ ) expected.add_argument("--required_str" , type=a_ , required=a_ ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=a_ , ) self.argparsersEqual(a_ , a_ ) def A ( self : Optional[Any] ): """simple docstring""" __snake_case = HfArgumentParser(a_ ) __snake_case = argparse.ArgumentParser() expected.add_argument("--foo" , type=a_ , required=a_ ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=a_ , ) expected.add_argument("--opt" , type=a_ , default=a_ ) expected.add_argument("--baz" , default="toto" , type=a_ , help="help message" ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=a_ ) self.argparsersEqual(a_ , a_ ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = HfArgumentParser(a_ ) __snake_case = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } __snake_case = parser.parse_dict(a_ )[0] __snake_case = BasicExample(**a_ ) self.assertEqual(a_ , a_ ) def A ( self : Dict ): """simple docstring""" __snake_case = HfArgumentParser(a_ ) __snake_case = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, "extra": 42, } self.assertRaises(a_ , parser.parse_dict , a_ , allow_extra_keys=a_ ) def A ( self : Dict ): """simple docstring""" __snake_case = HfArgumentParser(a_ ) __snake_case = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: __snake_case = os.path.join(a_ , "temp_json" ) os.mkdir(a_ ) with open(temp_local_path + ".json" , "w+" ) as f: json.dump(a_ , a_ ) __snake_case = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0] __snake_case = BasicExample(**a_ ) self.assertEqual(a_ , a_ ) def A ( self : int ): """simple docstring""" __snake_case = HfArgumentParser(a_ ) __snake_case = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: __snake_case = os.path.join(a_ , "temp_yaml" ) os.mkdir(a_ ) with open(temp_local_path + ".yaml" , "w+" ) as f: yaml.dump(a_ , a_ ) __snake_case = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0] __snake_case = BasicExample(**a_ ) self.assertEqual(a_ , a_ ) def A ( self : str ): """simple docstring""" __snake_case = HfArgumentParser(a_ ) self.assertIsNotNone(a_ )
680
'''simple docstring''' import math import sys import cva import numpy as np def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray: # For applying gaussian function for each element in matrix. __snake_case = math.sqrt(_UpperCAmelCase ) __snake_case = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray: __snake_case = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray: # Creates a gaussian kernel of given dimension. __snake_case = np.zeros((kernel_size, kernel_size) ) for i in range(0 , _UpperCAmelCase ): for j in range(0 , _UpperCAmelCase ): __snake_case = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray: __snake_case = np.zeros(img.shape ) __snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase ) __snake_case , __snake_case = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): __snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2] __snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase ) __snake_case = val return imga def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple: __snake_case = args[1] if args[1:] else "../image_data/lena.jpg" __snake_case = float(args[2] ) if args[2:] else 1.0 __snake_case = float(args[3] ) if args[3:] else 1.0 if args[4:]: __snake_case = int(args[4] ) __snake_case = kernel_size + abs(kernel_size % 2 - 1 ) else: __snake_case = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": a , a , a , a : Tuple = parse_args(sys.argv) a : Tuple = cva.imread(filename, 0) cva.imshow('''input image''', img) a : Dict = img / 255 a : str = out.astype('''float32''') a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) a : Dict = out * 255 a : List[str] = np.uinta(out) cva.imshow('''output image''', out) cva.waitKey(0) cva.destroyAllWindows()
680
1
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : Any ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A ( self : Optional[int] ): """simple docstring""" __snake_case = 1 __snake_case = 3 __snake_case = (32, 32) __snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a_ ) return image @property def A ( self : int ): """simple docstring""" torch.manual_seed(0 ) __snake_case = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def A ( self : int ): """simple docstring""" torch.manual_seed(0 ) __snake_case = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def A ( self : Optional[int] ): """simple docstring""" torch.manual_seed(0 ) __snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(a_ ) @property def A ( self : str ): """simple docstring""" def extract(*a_ : List[str] , **a_ : Optional[int] ): class SCREAMING_SNAKE_CASE__ : def __init__( self : Dict ): """simple docstring""" __snake_case = torch.ones([0] ) def A ( self : Tuple , a_ : Dict ): """simple docstring""" self.pixel_values.to(a_ ) return self return Out() return extract def A ( self : str ): """simple docstring""" __snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator __snake_case = self.dummy_cond_unet __snake_case = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , ) __snake_case = self.dummy_vae __snake_case = self.dummy_text_encoder __snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk __snake_case = StableDiffusionPipeline( unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , ) __snake_case = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) __snake_case = "A painting of a squirrel eating a burger" __snake_case = torch.Generator(device=a_ ).manual_seed(0 ) __snake_case = sd_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) __snake_case = output.images __snake_case = torch.Generator(device=a_ ).manual_seed(0 ) __snake_case = sd_pipe( [prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a_ , )[0] __snake_case = image[0, -3:, -3:, -1] __snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator __snake_case = self.dummy_cond_unet __snake_case = PNDMScheduler(skip_prk_steps=a_ ) __snake_case = self.dummy_vae __snake_case = self.dummy_text_encoder __snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk __snake_case = StableDiffusionPipeline( unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , ) __snake_case = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) __snake_case = "A painting of a squirrel eating a burger" __snake_case = torch.Generator(device=a_ ).manual_seed(0 ) __snake_case = sd_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) __snake_case = output.images __snake_case = torch.Generator(device=a_ ).manual_seed(0 ) __snake_case = sd_pipe( [prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a_ , )[0] __snake_case = image[0, -3:, -3:, -1] __snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A ( self : int ): """simple docstring""" __snake_case = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=a_ ) assert isinstance(a_ , a_ ) assert isinstance(pipe.scheduler , a_ ) assert pipe.safety_checker is None __snake_case = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a_ ) __snake_case = StableDiffusionPipeline.from_pretrained(a_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None __snake_case = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def A ( self : str ): """simple docstring""" __snake_case = self.dummy_cond_unet __snake_case = PNDMScheduler(skip_prk_steps=a_ ) __snake_case = self.dummy_vae __snake_case = self.dummy_text_encoder __snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # put models in fp16 __snake_case = unet.half() __snake_case = vae.half() __snake_case = bert.half() # make sure here that pndm scheduler skips prk __snake_case = StableDiffusionPipeline( unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , ) __snake_case = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) __snake_case = "A painting of a squirrel eating a burger" __snake_case = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : Any ): """simple docstring""" __snake_case = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a_ ) __snake_case = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __snake_case = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) __snake_case = ( "portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle" " coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with" " anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and" " children from bahnhof zoo, detailed " ) __snake_case = 4_003_660_346 __snake_case = 7 # without safety guidance (sld_guidance_scale = 0) __snake_case = torch.manual_seed(a_ ) __snake_case = sd_pipe( [prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) __snake_case = output.images __snake_case = image[0, -3:, -3:, -1] __snake_case = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) __snake_case = torch.manual_seed(a_ ) __snake_case = sd_pipe( [prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __snake_case = output.images __snake_case = image[0, -3:, -3:, -1] __snake_case = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A ( self : Any ): """simple docstring""" __snake_case = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a_ ) __snake_case = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __snake_case = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) __snake_case = "padme amidala taking a bath artwork, safe for work, no nudity" __snake_case = 2_734_971_755 __snake_case = 7 __snake_case = torch.manual_seed(a_ ) __snake_case = sd_pipe( [prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) __snake_case = output.images __snake_case = image[0, -3:, -3:, -1] __snake_case = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 __snake_case = torch.manual_seed(a_ ) __snake_case = sd_pipe( [prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __snake_case = output.images __snake_case = image[0, -3:, -3:, -1] __snake_case = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A ( self : Dict ): """simple docstring""" __snake_case = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" ) __snake_case = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) __snake_case = ( "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c." " leyendecker" ) __snake_case = 1_044_355_234 __snake_case = 12 __snake_case = torch.manual_seed(a_ ) __snake_case = sd_pipe( [prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) __snake_case = output.images __snake_case = image[0, -3:, -3:, -1] __snake_case = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 __snake_case = torch.manual_seed(a_ ) __snake_case = sd_pipe( [prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __snake_case = output.images __snake_case = image[0, -3:, -3:, -1] __snake_case = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
680
'''simple docstring''' class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ): """simple docstring""" __snake_case = name __snake_case = value __snake_case = weight def __repr__( self : Optional[int] ): """simple docstring""" return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def A ( self : Any ): """simple docstring""" return self.value def A ( self : str ): """simple docstring""" return self.name def A ( self : int ): """simple docstring""" return self.weight def A ( self : Tuple ): """simple docstring""" return self.value / self.weight def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]: __snake_case = [] for i in range(len(_UpperCAmelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int: __snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase ) __snake_case = [] __snake_case , __snake_case = 0.0, 0.0 for i in range(len(_UpperCAmelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __UpperCAmelCase ( ) -> Optional[Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """philschmid/bart-large-cnn-samsum""" __SCREAMING_SNAKE_CASE = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) __SCREAMING_SNAKE_CASE = """summarizer""" __SCREAMING_SNAKE_CASE = AutoTokenizer __SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM __SCREAMING_SNAKE_CASE = ["""text"""] __SCREAMING_SNAKE_CASE = ["""text"""] def A ( self : List[Any] , a_ : str ): """simple docstring""" return self.pre_processor(a_ , return_tensors="pt" , truncation=a_ ) def A ( self : Dict , a_ : Optional[int] ): """simple docstring""" return self.model.generate(**a_ )[0] def A ( self : Optional[int] , a_ : Dict ): """simple docstring""" return self.pre_processor.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
680
'''simple docstring''' import os from math import logaa def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int: __snake_case = 0 __snake_case = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ): __snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) ) if x * logaa(_UpperCAmelCase ) > largest: __snake_case = x * logaa(_UpperCAmelCase ) __snake_case = i + 1 return result if __name__ == "__main__": print(solution())
680
1
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : Tuple ): """simple docstring""" __snake_case = tempfile.mkdtemp() __snake_case = 5 # Realm tok __snake_case = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "test", "question", "this", "is", "the", "first", "second", "third", "fourth", "fifth", "record", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] __snake_case = os.path.join(self.tmpdirname , "realm_tokenizer" ) os.makedirs(a_ , exist_ok=a_ ) __snake_case = os.path.join(a_ , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) __snake_case = os.path.join(self.tmpdirname , "realm_block_records" ) os.makedirs(a_ , exist_ok=a_ ) def A ( self : int ): """simple docstring""" return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) ) def A ( self : List[Any] ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def A ( self : List[Any] ): """simple docstring""" __snake_case = RealmConfig(num_block_records=self.num_block_records ) return config def A ( self : Any ): """simple docstring""" __snake_case = Dataset.from_dict( { "id": ["0", "1"], "question": ["foo", "bar"], "answers": [["Foo", "Bar"], ["Bar"]], } ) return dataset def A ( self : int ): """simple docstring""" __snake_case = np.array( [ B"This is the first record", B"This is the second record", B"This is the third record", B"This is the fourth record", B"This is the fifth record", B"This is a longer longer longer record", ] , dtype=a_ , ) return block_records def A ( self : List[str] ): """simple docstring""" __snake_case = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def A ( self : Optional[Any] ): """simple docstring""" __snake_case = self.get_config() __snake_case = self.get_dummy_retriever() __snake_case = retriever.tokenizer __snake_case = np.array([0, 3] , dtype="long" ) __snake_case = tokenizer(["Test question"] ).input_ids __snake_case = tokenizer( ["the fourth"] , add_special_tokens=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , ).input_ids __snake_case = config.reader_seq_len __snake_case , __snake_case , __snake_case , __snake_case = retriever( a_ , a_ , answer_ids=a_ , max_length=a_ , return_tensors="np" ) self.assertEqual(len(a_ ) , 2 ) self.assertEqual(len(a_ ) , 2 ) self.assertEqual(len(a_ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , ) def A ( self : Optional[int] ): """simple docstring""" __snake_case = self.get_config() __snake_case = self.get_dummy_retriever() __snake_case = retriever.tokenizer __snake_case = np.array([0, 3, 5] , dtype="long" ) __snake_case = tokenizer(["Test question"] ).input_ids __snake_case = tokenizer( ["the fourth", "longer longer"] , add_special_tokens=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , ).input_ids __snake_case = config.reader_seq_len __snake_case , __snake_case , __snake_case , __snake_case = retriever( a_ , a_ , answer_ids=a_ , max_length=a_ , return_tensors="np" ) self.assertEqual([False, True, True] , a_ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , a_ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , a_ ) def A ( self : Tuple ): """simple docstring""" __snake_case = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) # Test local path __snake_case = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) self.assertEqual(retriever.block_records[0] , B"This is the first record" ) # Test mocked remote path with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download: __snake_case = os.path.join( os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME ) __snake_case = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" ) self.assertEqual(retriever.block_records[0] , B"This is the first record" )
680
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer a : List[Any] = logging.get_logger(__name__) a : Dict = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } a : Any = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } a : Optional[int] = { '''facebook/blenderbot_small-90M''': 512, } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ): """simple docstring""" super().__init__( ByteLevelBPETokenizer( vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , ) __snake_case = add_prefix_space def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ): """simple docstring""" __snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ): """simple docstring""" __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
680
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : Union[str, Any] = { '''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ '''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NezhaForNextSentencePrediction''', '''NezhaForMaskedLM''', '''NezhaForPreTraining''', '''NezhaForMultipleChoice''', '''NezhaForQuestionAnswering''', '''NezhaForSequenceClassification''', '''NezhaForTokenClassification''', '''NezhaModel''', '''NezhaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys a : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
680
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : str = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
680
1
'''simple docstring''' import numpy as np def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray ) -> np.ndarray: return vector * sigmoid(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
680
'''simple docstring''' # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers a : Optional[Any] = float('''nan''') class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : Optional[int] ): """simple docstring""" __snake_case = sys.stdout __snake_case = open(a_ , "a" ) def __getattr__( self : str , a_ : List[Any] ): """simple docstring""" return getattr(self.stdout , a_ ) def A ( self : Union[str, Any] , a_ : List[Any] ): """simple docstring""" self.stdout.write(a_ ) # strip tqdm codes self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) ) def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]: __snake_case = [] # deal with critical env vars __snake_case = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: __snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase ) if val is not None: cmd.append(F'''{key}={val}''' ) # python executable (not always needed if the script is executable) __snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(_UpperCAmelCase ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes __snake_case = [] __snake_case = "" while len(_UpperCAmelCase ) > 0: current_line += F'''{cmd.pop(0 )} ''' if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(_UpperCAmelCase ) __snake_case = "" return "\\\n".join(_UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple: # unwrap multi-line input __snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own __snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += F''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir __snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str: # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , ) __snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams __snake_case = variation.replace(" " , "-" ) with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f: f.write(result.stdout ) with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f: __snake_case = json.load(_UpperCAmelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict: __snake_case = [] __snake_case = [] __snake_case = F'''{id}: {variation:<{longest_variation_len}}''' __snake_case = F'''{preamble}: ''' __snake_case = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ): __snake_case = process_run_single( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case = single_run_metrics[target_metric_key] if not math.isnan(_UpperCAmelCase ): metrics.append(_UpperCAmelCase ) results.append(_UpperCAmelCase ) outcome += "✓" else: outcome += "✘" __snake_case = F'''\33[2K\r{outcome}''' if len(_UpperCAmelCase ) > 0: __snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} __snake_case = round(mean_metrics[target_metric_key] , 2 ) __snake_case = F'''{outcome} {mean_target}''' if len(_UpperCAmelCase ) > 1: results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}''' print(_UpperCAmelCase ) __snake_case = variation return mean_metrics else: print(_UpperCAmelCase ) return {variation_key: variation, target_metric_key: nan} def __UpperCAmelCase ( ) -> Optional[int]: __snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) ) return F''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]: __snake_case = pd.DataFrame(_UpperCAmelCase ) __snake_case = "variation" __snake_case = "diff_%" __snake_case = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan __snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(_UpperCAmelCase ): # as a fallback, use the minimal value as the sentinel __snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(_UpperCAmelCase ): __snake_case = df.apply( lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns __snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys] __snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols # capitalize __snake_case = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible __snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" ) __snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" ) __snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )] print("\n\n".join(_UpperCAmelCase ) ) def __UpperCAmelCase ( ) -> Dict: __snake_case = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , ) parser.add_argument( "--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) __snake_case = parser.parse_args() __snake_case = args.output_dir Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) __snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase ) # split each dimension into its --foo variations __snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty __snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) ) __snake_case = max(len(_UpperCAmelCase ) for x in variations ) # split wanted keys __snake_case = args.report_metric_keys.split() # capture prints into a log file for convenience __snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(F'''and this script\'s output is also piped into {report_fn}''' ) __snake_case = Tee(_UpperCAmelCase ) print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' ) print(F'''Base command: {" ".join(_UpperCAmelCase )}''' ) __snake_case = "variation" __snake_case = [] for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ): __snake_case = base_cmd + variation.split() results.append( process_run( id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) ) process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase ) if __name__ == "__main__": main()
680
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ): __SCREAMING_SNAKE_CASE = ["""flax""", """transformers"""] def __init__( self : int , *a_ : List[str] , **a_ : Optional[int] ): """simple docstring""" requires_backends(self , ["flax", "transformers"] ) @classmethod def A ( cls : Union[str, Any] , *a_ : Tuple , **a_ : Union[str, Any] ): """simple docstring""" requires_backends(cls , ["flax", "transformers"] ) @classmethod def A ( cls : Union[str, Any] , *a_ : List[Any] , **a_ : Dict ): """simple docstring""" requires_backends(cls , ["flax", "transformers"] ) class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ): __SCREAMING_SNAKE_CASE = ["""flax""", """transformers"""] def __init__( self : Any , *a_ : str , **a_ : Dict ): """simple docstring""" requires_backends(self , ["flax", "transformers"] ) @classmethod def A ( cls : Dict , *a_ : List[Any] , **a_ : Tuple ): """simple docstring""" requires_backends(cls , ["flax", "transformers"] ) @classmethod def A ( cls : List[str] , *a_ : Optional[int] , **a_ : Union[str, Any] ): """simple docstring""" requires_backends(cls , ["flax", "transformers"] ) class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ): __SCREAMING_SNAKE_CASE = ["""flax""", """transformers"""] def __init__( self : str , *a_ : Union[str, Any] , **a_ : int ): """simple docstring""" requires_backends(self , ["flax", "transformers"] ) @classmethod def A ( cls : Union[str, Any] , *a_ : str , **a_ : Optional[Any] ): """simple docstring""" requires_backends(cls , ["flax", "transformers"] ) @classmethod def A ( cls : Optional[int] , *a_ : str , **a_ : Tuple ): """simple docstring""" requires_backends(cls , ["flax", "transformers"] ) class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ): __SCREAMING_SNAKE_CASE = ["""flax""", """transformers"""] def __init__( self : List[Any] , *a_ : Union[str, Any] , **a_ : List[str] ): """simple docstring""" requires_backends(self , ["flax", "transformers"] ) @classmethod def A ( cls : Tuple , *a_ : Union[str, Any] , **a_ : int ): """simple docstring""" requires_backends(cls , ["flax", "transformers"] ) @classmethod def A ( cls : Optional[int] , *a_ : Optional[Any] , **a_ : str ): """simple docstring""" requires_backends(cls , ["flax", "transformers"] )
680
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __UpperCAmelCase ( ) -> Dict: with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" __snake_case = [1, 2, 3] with pytest.raises(_UpperCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 ) with pytest.raises(_UpperCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]: __snake_case = [1, 2] __snake_case = {"a": 1, "b": 2} __snake_case = {"a": [1, 2], "b": [3, 4]} __snake_case = {"a": {"1": 1}, "b": 2} __snake_case = {"a": 1, "b": 2, "c": 3, "d": 4} __snake_case = [2, 3] __snake_case = {"a": 2, "b": 3} __snake_case = {"a": [2, 3], "b": [4, 5]} __snake_case = {"a": {"1": 2}, "b": 3} __snake_case = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
680
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a : Optional[int] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = ['''NllbTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Dict = ['''NllbTokenizerFast'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
680
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : int = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """mobilenet_v2""" def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ): """simple docstring""" super().__init__(**a_ ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) __snake_case = num_channels __snake_case = image_size __snake_case = depth_multiplier __snake_case = depth_divisible_by __snake_case = min_depth __snake_case = expand_ratio __snake_case = output_stride __snake_case = first_layer_is_expansion __snake_case = finegrained_output __snake_case = hidden_act __snake_case = tf_padding __snake_case = classifier_dropout_prob __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = semantic_loss_ignore_index class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = version.parse("""1.11""" ) @property def A ( self : Optional[int] ): """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def A ( self : Optional[int] ): """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def A ( self : int ): """simple docstring""" return 1e-4
680
1
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """Wav2Vec2FeatureExtractor""" __SCREAMING_SNAKE_CASE = """AutoTokenizer""" def __init__( self : List[Any] , a_ : List[Any] , a_ : Tuple ): """simple docstring""" super().__init__(a_ , a_ ) __snake_case = self.feature_extractor __snake_case = False @classmethod def A ( cls : Any , a_ : int , **a_ : List[str] ): """simple docstring""" try: return super().from_pretrained(a_ , **a_ ) except OSError: warnings.warn( f'''Loading a tokenizer inside {cls.__name__} from a config that does not''' " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , a_ , ) __snake_case = WavaVecaFeatureExtractor.from_pretrained(a_ , **a_ ) __snake_case = WavaVecaCTCTokenizer.from_pretrained(a_ , **a_ ) return cls(feature_extractor=a_ , tokenizer=a_ ) def __call__( self : int , *a_ : Optional[Any] , **a_ : int ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*a_ , **a_ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) __snake_case = kwargs.pop("raw_speech" ) else: __snake_case = kwargs.pop("audio" , a_ ) __snake_case = kwargs.pop("sampling_rate" , a_ ) __snake_case = kwargs.pop("text" , a_ ) if len(a_ ) > 0: __snake_case = args[0] __snake_case = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: __snake_case = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ ) if text is not None: __snake_case = self.tokenizer(a_ , **a_ ) if text is None: return inputs elif audio is None: return encodings else: __snake_case = encodings["input_ids"] return inputs def A ( self : int , *a_ : Tuple , **a_ : Dict ): """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*a_ , **a_ ) __snake_case = kwargs.pop("input_features" , a_ ) __snake_case = kwargs.pop("labels" , a_ ) if len(a_ ) > 0: __snake_case = args[0] __snake_case = args[1:] if input_features is not None: __snake_case = self.feature_extractor.pad(a_ , *a_ , **a_ ) if labels is not None: __snake_case = self.tokenizer.pad(a_ , **a_ ) if labels is None: return input_features elif input_features is None: return labels else: __snake_case = labels["input_ids"] return input_features def A ( self : Optional[Any] , *a_ : Optional[int] , **a_ : str ): """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def A ( self : str , *a_ : List[Any] , **a_ : Optional[Any] ): """simple docstring""" return self.tokenizer.decode(*a_ , **a_ ) @contextmanager def A ( self : Dict ): """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) __snake_case = True __snake_case = self.tokenizer yield __snake_case = self.feature_extractor __snake_case = False
680
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : List[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """data2vec-text""" def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ): """simple docstring""" super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = hidden_act __snake_case = intermediate_size __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = position_embedding_type __snake_case = use_cache __snake_case = classifier_dropout class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): @property def A ( self : Any ): """simple docstring""" if self.task == "multiple-choice": __snake_case = {0: "batch", 1: "choice", 2: "sequence"} else: __snake_case = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
680
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : int = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """mobilenet_v2""" def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ): """simple docstring""" super().__init__(**a_ ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) __snake_case = num_channels __snake_case = image_size __snake_case = depth_multiplier __snake_case = depth_divisible_by __snake_case = min_depth __snake_case = expand_ratio __snake_case = output_stride __snake_case = first_layer_is_expansion __snake_case = finegrained_output __snake_case = hidden_act __snake_case = tf_padding __snake_case = classifier_dropout_prob __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = semantic_loss_ignore_index class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = version.parse("""1.11""" ) @property def A ( self : Optional[int] ): """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def A ( self : Optional[int] ): """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def A ( self : int ): """simple docstring""" return 1e-4
680
'''simple docstring''' import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) a : Tuple = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ): """simple docstring""" __snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] ) __snake_case = layer_outputs[0] return hidden_states @add_start_docstrings( """The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : int , a_ : int ): """simple docstring""" super().__init__(a_ ) __snake_case = BertEncoderWithPabee(a_ ) self.init_weights() __snake_case = 0 __snake_case = 0 __snake_case = 0 __snake_case = 0 def A ( self : Optional[int] , a_ : Union[str, Any] ): """simple docstring""" __snake_case = threshold def A ( self : Optional[Any] , a_ : Union[str, Any] ): """simple docstring""" __snake_case = patience def A ( self : Any ): """simple docstring""" __snake_case = 0 __snake_case = 0 def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.inference_layers_num / self.inference_instances_num __snake_case = ( f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =''' f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***''' ) print(a_ ) @add_start_docstrings_to_model_forward(a_ ) def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ): """simple docstring""" if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: __snake_case = input_ids.size() elif inputs_embeds is not None: __snake_case = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) __snake_case = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __snake_case = torch.ones(a_ , device=a_ ) if token_type_ids is None: __snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __snake_case = self.get_extended_attention_mask(a_ , a_ , a_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: __snake_case , __snake_case , __snake_case = encoder_hidden_states.size() __snake_case = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: __snake_case = torch.ones(a_ , device=a_ ) __snake_case = self.invert_attention_mask(a_ ) else: __snake_case = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers ) __snake_case = self.embeddings( input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ ) __snake_case = embedding_output if self.training: __snake_case = [] for i in range(self.config.num_hidden_layers ): __snake_case = self.encoder.adaptive_forward( a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ ) __snake_case = self.pooler(a_ ) __snake_case = output_layers[i](output_dropout(a_ ) ) res.append(a_ ) elif self.patience == 0: # Use all layers for inference __snake_case = self.encoder( a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , ) __snake_case = self.pooler(encoder_outputs[0] ) __snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )] else: __snake_case = 0 __snake_case = None __snake_case = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 __snake_case = self.encoder.adaptive_forward( a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ ) __snake_case = self.pooler(a_ ) __snake_case = output_layers[i](a_ ) if regression: __snake_case = logits.detach() if patient_result is not None: __snake_case = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: __snake_case = 0 else: __snake_case = logits.detach().argmax(dim=1 ) if patient_result is not None: __snake_case = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(a_ ) ): patient_counter += 1 else: __snake_case = 0 __snake_case = logits if patient_counter == self.patience: break __snake_case = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( """Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : List[str] , a_ : Tuple ): """simple docstring""" super().__init__(a_ ) __snake_case = config.num_labels __snake_case = BertModelWithPabee(a_ ) __snake_case = nn.Dropout(config.hidden_dropout_prob ) __snake_case = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(a_ ) def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ): """simple docstring""" __snake_case = self.bert( input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) __snake_case = (logits[-1],) if labels is not None: __snake_case = None __snake_case = 0 for ix, logits_item in enumerate(a_ ): if self.num_labels == 1: # We are doing regression __snake_case = MSELoss() __snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: __snake_case = CrossEntropyLoss() __snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: __snake_case = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 __snake_case = (total_loss / total_weights,) + outputs return outputs
680
1
'''simple docstring''' import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class SCREAMING_SNAKE_CASE__ : def __init__( self : int , a_ : Dict , a_ : Optional[int]=3 , a_ : Dict=7 , a_ : Optional[int]=True , a_ : Optional[Any]=True , a_ : Optional[int]=False , a_ : List[str]=True , a_ : List[str]=99 , a_ : Optional[int]=32 , a_ : Dict=5 , a_ : Any=4 , a_ : int=37 , a_ : Dict="gelu" , a_ : Any=0.1 , a_ : Union[str, Any]=0.1 , a_ : Any=512 , a_ : Union[str, Any]=16 , a_ : Union[str, Any]=2 , a_ : List[str]=0.02 , a_ : Dict=3 , a_ : Tuple=4 , a_ : int=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = num_choices __snake_case = scope def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = None if self.use_input_mask: __snake_case = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case = None __snake_case = None __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case = ids_tensor([self.batch_size] , self.num_choices ) __snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Optional[int] ): """simple docstring""" return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=a_ , ) def A ( self : Optional[int] , a_ : Any , a_ : Union[str, Any] , a_ : List[str] , a_ : int , a_ : Optional[Any] , a_ : Optional[int] , a_ : Optional[int] ): """simple docstring""" __snake_case = FalconModel(config=a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , attention_mask=a_ ) __snake_case = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Any , a_ : List[Any] , a_ : List[Any] , a_ : List[str] , a_ : Tuple , a_ : Union[str, Any] , a_ : Tuple , a_ : List[Any] , a_ : Union[str, Any] , a_ : Tuple , ): """simple docstring""" __snake_case = True __snake_case = FalconModel(a_ ) model.to(a_ ) model.eval() __snake_case = model( a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , ) __snake_case = model( a_ , attention_mask=a_ , encoder_hidden_states=a_ , ) __snake_case = model(a_ , attention_mask=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Optional[Any] , a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict , a_ : Tuple , a_ : Optional[int] , a_ : Optional[Any] , a_ : str , a_ : str , ): """simple docstring""" __snake_case = FalconForCausalLM(config=a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Any , a_ : Union[str, Any] , a_ : List[Any] , a_ : Optional[int] , a_ : List[str] , a_ : Union[str, Any] , a_ : Any , a_ : List[Any] , a_ : Union[str, Any] , a_ : Any , ): """simple docstring""" __snake_case = True __snake_case = True __snake_case = FalconForCausalLM(config=a_ ) model.to(a_ ) model.eval() # first forward pass __snake_case = model( a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , use_cache=a_ , ) __snake_case = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size ) __snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __snake_case = torch.cat([input_ids, next_tokens] , dim=-1 ) __snake_case = torch.cat([input_mask, next_mask] , dim=-1 ) __snake_case = model( a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , output_hidden_states=a_ , )["hidden_states"][0] __snake_case = model( a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , past_key_values=a_ , output_hidden_states=a_ , )["hidden_states"][0] # select random slice __snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item() __snake_case = output_from_no_past[:, -3:, random_slice_idx].detach() __snake_case = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) ) def A ( self : str ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = (FalconForCausalLM,) if is_torch_available() else () __SCREAMING_SNAKE_CASE = ( { """feature-extraction""": FalconModel, """text-classification""": FalconForSequenceClassification, """text-generation""": FalconForCausalLM, """question-answering""": FalconForQuestionAnswering, """token-classification""": FalconForTokenClassification, """zero-shot""": FalconForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def A ( self : List[Any] ): """simple docstring""" __snake_case = FalconModelTester(self ) __snake_case = ConfigTester(self , config_class=a_ , hidden_size=37 ) def A ( self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def A ( self : Optional[Any] ): """simple docstring""" __snake_case , *__snake_case = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: __snake_case = alibi self.model_tester.create_and_check_model(a_ , *a_ ) def A ( self : List[Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = input_dict["input_ids"] __snake_case = input_ids.ne(1 ).to(a_ ) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __snake_case = FalconForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A ( self : str ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = "single_label_classification" __snake_case = input_dict["input_ids"] __snake_case = input_ids.ne(1 ).to(a_ ) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __snake_case = FalconForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A ( self : str ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = input_dict["input_ids"] __snake_case = FalconForCausalLM(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , use_cache=a_ ) __snake_case = input_ids.shape[0] __snake_case = model._convert_to_rw_cache(result.past_key_values ) __snake_case = model._convert_cache_to_standard_format(a_ , a_ ) for layer in range(len(a_ ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = "multi_label_classification" __snake_case = input_dict["input_ids"] __snake_case = input_ids.ne(1 ).to(a_ ) __snake_case = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __snake_case = FalconForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A ( self : List[str] ): """simple docstring""" for model_class in self.all_generative_model_classes: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(a_ , "use_cache" ): return __snake_case = model_class(a_ ).to(a_ ) if "use_cache" not in inputs: __snake_case = True __snake_case = model(**a_ ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return __snake_case = ( getattr(a_ , "decoder_layers" , a_ ) or getattr(a_ , "num_decoder_layers" , a_ ) or config.num_hidden_layers ) __snake_case = getattr(a_ , "num_kv_heads" , config.num_attention_heads ) __snake_case = getattr(a_ , "d_model" , config.hidden_size ) __snake_case = embed_dim // num_attention_heads __snake_case = outputs["past_key_values"] self.assertEqual(len(a_ ) , a_ ) __snake_case , __snake_case = inputs["input_ids"].shape for i in range(a_ ): if config.new_decoder_architecture: __snake_case = config.num_attention_heads elif config.multi_query: __snake_case = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def A ( self : Any ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" ) __snake_case = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" ) model.eval() model.to(a_ ) __snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ ) __snake_case = ( "My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday." ) __snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=19 ) __snake_case = tokenizer.batch_decode(a_ )[0] self.assertEqual(a_ , a_ ) @slow def A ( self : Optional[int] ): """simple docstring""" for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: __snake_case = AutoTokenizer.from_pretrained(a_ ) __snake_case = FalconForCausalLM.from_pretrained(a_ ) model.eval() model.to(a_ ) __snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**a_ , do_sample=a_ , max_new_tokens=4 ) model.generate(**a_ , do_sample=a_ , max_new_tokens=4 ) model.generate(**a_ , num_beams=2 , max_new_tokens=4 ) @slow def A ( self : List[str] ): """simple docstring""" with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: __snake_case = AutoTokenizer.from_pretrained(a_ ) __snake_case = FalconForCausalLM.from_pretrained(a_ ) model.eval() model.to(device=a_ ) __snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ ) # Test results are the same with and without cache __snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=20 , use_cache=a_ ) __snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=20 , use_cache=a_ ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
680
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = is_training __snake_case = use_labels __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = backbone_out_indices __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = initializer_range __snake_case = num_labels __snake_case = backbone_featmap_shape __snake_case = scope __snake_case = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) __snake_case = (image_size // patch_size) ** 2 __snake_case = num_patches + 1 def A ( self : int ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __snake_case = self.get_config() return config, pixel_values, labels def A ( self : Optional[Any] ): """simple docstring""" __snake_case = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [96, 192, 384, 768], "num_groups": 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , ) def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ): """simple docstring""" __snake_case = DPTModel(config=a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ): """simple docstring""" __snake_case = self.num_labels __snake_case = DPTForDepthEstimation(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ): """simple docstring""" __snake_case = self.num_labels __snake_case = DPTForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , labels=a_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def A ( self : List[Any] ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () __SCREAMING_SNAKE_CASE = ( { """depth-estimation""": DPTForDepthEstimation, """feature-extraction""": DPTModel, """image-segmentation""": DPTForSemanticSegmentation, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def A ( self : Optional[Any] ): """simple docstring""" __snake_case = DPTModelTester(self ) __snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 ) def A ( self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DPT does not use inputs_embeds" ) def A ( self : Any ): """simple docstring""" pass def A ( self : Union[str, Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ , nn.Linear ) ) def A ( self : List[str] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a_ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ["pixel_values"] self.assertListEqual(arg_names[:1] , a_ ) def A ( self : int ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*a_ ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def A ( self : Optional[int] ): """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = True if model_class in get_values(a_ ): continue __snake_case = model_class(a_ ) model.to(a_ ) model.train() __snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ ) __snake_case = model(**a_ ).loss loss.backward() def A ( self : int ): """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = False __snake_case = True if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing: continue __snake_case = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() __snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ ) __snake_case = model(**a_ ).loss loss.backward() def A ( self : Dict ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = _config_zero_init(a_ ) for model_class in self.all_model_classes: __snake_case = model_class(config=a_ ) # Skip the check for the backbone __snake_case = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": __snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : Tuple ): """simple docstring""" pass @slow def A ( self : int ): """simple docstring""" for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: __snake_case = DPTModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def A ( self : int ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = "add" with self.assertRaises(a_ ): __snake_case = DPTForDepthEstimation(a_ ) def __UpperCAmelCase ( ) -> Union[str, Any]: __snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : Dict ): """simple docstring""" __snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" ) __snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ ) __snake_case = prepare_img() __snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ ) # forward pass with torch.no_grad(): __snake_case = model(**a_ ) __snake_case = outputs.predicted_depth # verify the predicted depth __snake_case = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , a_ ) __snake_case = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
680
1
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = (DDPMParallelScheduler,) def A ( self : List[Any] , **a_ : Tuple ): """simple docstring""" __snake_case = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**a_ ) return config def A ( self : Tuple ): """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=a_ ) def A ( self : Optional[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=a_ , beta_end=a_ ) def A ( self : Optional[Any] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=a_ ) def A ( self : str ): """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=a_ ) def A ( self : List[Any] ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=a_ ) def A ( self : str ): """simple docstring""" self.check_over_configs(thresholding=a_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=a_ , prediction_type=a_ , sample_max_value=a_ , ) def A ( self : Union[str, Any] ): """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=a_ ) def A ( self : Dict ): """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=a_ ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.scheduler_classes[0] __snake_case = self.get_scheduler_config() __snake_case = scheduler_class(**a_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def A ( self : Tuple ): """simple docstring""" __snake_case = self.scheduler_classes[0] __snake_case = self.get_scheduler_config() __snake_case = scheduler_class(**a_ ) __snake_case = len(a_ ) __snake_case = self.dummy_model() __snake_case = self.dummy_sample_deter __snake_case = self.dummy_sample_deter + 0.1 __snake_case = self.dummy_sample_deter - 0.1 __snake_case = samplea.shape[0] __snake_case = torch.stack([samplea, samplea, samplea] , dim=0 ) __snake_case = torch.arange(a_ )[0:3, None].repeat(1 , a_ ) __snake_case = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __snake_case = scheduler.batch_step_no_noise(a_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) __snake_case = torch.sum(torch.abs(a_ ) ) __snake_case = torch.mean(torch.abs(a_ ) ) assert abs(result_sum.item() - 1153.1833 ) < 1e-2 assert abs(result_mean.item() - 0.5005 ) < 1e-3 def A ( self : Dict ): """simple docstring""" __snake_case = self.scheduler_classes[0] __snake_case = self.get_scheduler_config() __snake_case = scheduler_class(**a_ ) __snake_case = len(a_ ) __snake_case = self.dummy_model() __snake_case = self.dummy_sample_deter __snake_case = torch.manual_seed(0 ) for t in reversed(range(a_ ) ): # 1. predict noise residual __snake_case = model(a_ , a_ ) # 2. predict previous mean of sample x_t-1 __snake_case = scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample __snake_case = pred_prev_sample __snake_case = torch.sum(torch.abs(a_ ) ) __snake_case = torch.mean(torch.abs(a_ ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def A ( self : str ): """simple docstring""" __snake_case = self.scheduler_classes[0] __snake_case = self.get_scheduler_config(prediction_type="v_prediction" ) __snake_case = scheduler_class(**a_ ) __snake_case = len(a_ ) __snake_case = self.dummy_model() __snake_case = self.dummy_sample_deter __snake_case = torch.manual_seed(0 ) for t in reversed(range(a_ ) ): # 1. predict noise residual __snake_case = model(a_ , a_ ) # 2. predict previous mean of sample x_t-1 __snake_case = scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample __snake_case = pred_prev_sample __snake_case = torch.sum(torch.abs(a_ ) ) __snake_case = torch.mean(torch.abs(a_ ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def A ( self : Tuple ): """simple docstring""" __snake_case = self.scheduler_classes[0] __snake_case = self.get_scheduler_config() __snake_case = scheduler_class(**a_ ) __snake_case = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=a_ ) __snake_case = scheduler.timesteps for i, timestep in enumerate(a_ ): if i == len(a_ ) - 1: __snake_case = -1 else: __snake_case = timesteps[i + 1] __snake_case = scheduler.previous_timestep(a_ ) __snake_case = prev_t.item() self.assertEqual(a_ , a_ ) def A ( self : Tuple ): """simple docstring""" __snake_case = self.scheduler_classes[0] __snake_case = self.get_scheduler_config() __snake_case = scheduler_class(**a_ ) __snake_case = [100, 87, 50, 51, 0] with self.assertRaises(a_ , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=a_ ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.scheduler_classes[0] __snake_case = self.get_scheduler_config() __snake_case = scheduler_class(**a_ ) __snake_case = [100, 87, 50, 1, 0] __snake_case = len(a_ ) with self.assertRaises(a_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=a_ , timesteps=a_ ) def A ( self : Tuple ): """simple docstring""" __snake_case = self.scheduler_classes[0] __snake_case = self.get_scheduler_config() __snake_case = scheduler_class(**a_ ) __snake_case = [scheduler.config.num_train_timesteps] with self.assertRaises( a_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=a_ )
680
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def A ( self : Any ): """simple docstring""" return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
680
1
'''simple docstring''' class SCREAMING_SNAKE_CASE__ : def __init__( self : Dict , a_ : int , a_ : int=None , a_ : Optional[Any]=None ): """simple docstring""" __snake_case = data __snake_case = previous __snake_case = next_node def __str__( self : int ): """simple docstring""" return f'''{self.data}''' def A ( self : Optional[Any] ): """simple docstring""" return self.data def A ( self : List[Any] ): """simple docstring""" return self.next def A ( self : int ): """simple docstring""" return self.previous class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : int ): """simple docstring""" __snake_case = head def __iter__( self : int ): """simple docstring""" return self def A ( self : Tuple ): """simple docstring""" if not self.current: raise StopIteration else: __snake_case = self.current.get_data() __snake_case = self.current.get_next() return value class SCREAMING_SNAKE_CASE__ : def __init__( self : List[Any] ): """simple docstring""" __snake_case = None # First node in list __snake_case = None # Last node in list def __str__( self : Optional[int] ): """simple docstring""" __snake_case = self.head __snake_case = [] while current is not None: nodes.append(current.get_data() ) __snake_case = current.get_next() return " ".join(str(a_ ) for node in nodes ) def __contains__( self : int , a_ : int ): """simple docstring""" __snake_case = self.head while current: if current.get_data() == value: return True __snake_case = current.get_next() return False def __iter__( self : int ): """simple docstring""" return LinkedListIterator(self.head ) def A ( self : List[Any] ): """simple docstring""" if self.head: return self.head.get_data() return None def A ( self : Tuple ): """simple docstring""" if self.tail: return self.tail.get_data() return None def A ( self : int , a_ : Node ): """simple docstring""" if self.head is None: __snake_case = node __snake_case = node else: self.insert_before_node(self.head , a_ ) def A ( self : List[Any] , a_ : Node ): """simple docstring""" if self.head is None: self.set_head(a_ ) else: self.insert_after_node(self.tail , a_ ) def A ( self : int , a_ : int ): """simple docstring""" __snake_case = Node(a_ ) if self.head is None: self.set_head(a_ ) else: self.set_tail(a_ ) def A ( self : Dict , a_ : Node , a_ : Node ): """simple docstring""" __snake_case = node __snake_case = node.previous if node.get_previous() is None: __snake_case = node_to_insert else: __snake_case = node_to_insert __snake_case = node_to_insert def A ( self : int , a_ : Node , a_ : Node ): """simple docstring""" __snake_case = node __snake_case = node.next if node.get_next() is None: __snake_case = node_to_insert else: __snake_case = node_to_insert __snake_case = node_to_insert def A ( self : List[Any] , a_ : int , a_ : int ): """simple docstring""" __snake_case = 1 __snake_case = Node(a_ ) __snake_case = self.head while node: if current_position == position: self.insert_before_node(a_ , a_ ) return current_position += 1 __snake_case = node.next self.insert_after_node(self.tail , a_ ) def A ( self : Any , a_ : int ): """simple docstring""" __snake_case = self.head while node: if node.get_data() == item: return node __snake_case = node.get_next() raise Exception("Node not found" ) def A ( self : List[Any] , a_ : Union[str, Any] ): """simple docstring""" if (node := self.get_node(a_ )) is not None: if node == self.head: __snake_case = self.head.get_next() if node == self.tail: __snake_case = self.tail.get_previous() self.remove_node_pointers(a_ ) @staticmethod def A ( a_ : Node ): """simple docstring""" if node.get_next(): __snake_case = node.previous if node.get_previous(): __snake_case = node.next __snake_case = None __snake_case = None def A ( self : List[Any] ): """simple docstring""" return self.head is None def __UpperCAmelCase ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
680
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device a : Optional[Any] = False class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): pass @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : int ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : List[Any] ): """simple docstring""" __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" ) # remove text_unet pipe.remove_unused_weights() pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = "A painting of a squirrel eating a burger " __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a_ ) __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = generator.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def A ( self : Optional[int] ): """simple docstring""" __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained( "shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = "A painting of a squirrel eating a burger " __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images __snake_case = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) __snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
680
1
'''simple docstring''' from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 a : str = { # 1536-bit 5: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 2048-bit 14: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 3072-bit 15: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 4096-bit 16: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199''' + '''FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 6144-bit 17: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08''' + '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B''' + '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9''' + '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6''' + '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8''' + '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C''' + '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718''' + '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D''' + '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D''' + '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226''' + '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC''' + '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26''' + '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB''' + '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2''' + '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127''' + '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406''' + '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918''' + '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151''' + '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03''' + '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F''' + '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B''' + '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632''' + '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E''' + '''6DCC4024FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 8192-bit 18: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD''' + '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831''' + '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B''' + '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF''' + '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6''' + '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3''' + '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328''' + '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C''' + '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE''' + '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4''' + '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300''' + '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568''' + '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9''' + '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B''' + '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A''' + '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36''' + '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1''' + '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92''' + '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47''' + '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71''' + '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, } class SCREAMING_SNAKE_CASE__ : def __init__( self : Tuple , a_ : int = 14 ): """simple docstring""" if group not in primes: raise ValueError("Unsupported Group" ) __snake_case = primes[group]["prime"] __snake_case = primes[group]["generator"] __snake_case = int(hexlify(urandom(32 ) ) , base=16 ) def A ( self : Dict ): """simple docstring""" return hex(self.__private_key )[2:] def A ( self : List[Any] ): """simple docstring""" __snake_case = pow(self.generator , self.__private_key , self.prime ) return hex(a_ )[2:] def A ( self : Tuple , a_ : int ): """simple docstring""" return ( 2 <= key <= self.prime - 2 and pow(a_ , (self.prime - 1) // 2 , self.prime ) == 1 ) def A ( self : Tuple , a_ : str ): """simple docstring""" __snake_case = int(a_ , base=16 ) if not self.is_valid_public_key(a_ ): raise ValueError("Invalid public key" ) __snake_case = pow(a_ , self.__private_key , self.prime ) return shaaaa(str(a_ ).encode() ).hexdigest() @staticmethod def A ( a_ : int , a_ : int ): """simple docstring""" return ( 2 <= remote_public_key_str <= prime - 2 and pow(a_ , (prime - 1) // 2 , a_ ) == 1 ) @staticmethod def A ( a_ : str , a_ : str , a_ : int = 14 ): """simple docstring""" __snake_case = int(a_ , base=16 ) __snake_case = int(a_ , base=16 ) __snake_case = primes[group]["prime"] if not DiffieHellman.is_valid_public_key_static(a_ , a_ ): raise ValueError("Invalid public key" ) __snake_case = pow(a_ , a_ , a_ ) return shaaaa(str(a_ ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
680
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType a : Any = get_logger(__name__) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any: os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __snake_case = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if accelerator.process_index == 0: logger.info(F'''Saving model to {output_model_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __snake_case = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Saving model to {output_model_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) logger.info(F'''Saving model to {ckpt_dir}''' ) __snake_case = {"model": state_dict} dist_cp.save_state_dict( state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , ) logger.info(F'''Model saved to {ckpt_dir}''' ) def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]: accelerator.wait_for_everyone() with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return __snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading model from {input_model_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __snake_case = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading model from {input_model_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __snake_case = ( os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' ) if F'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading model from {ckpt_dir}''' ) __snake_case = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , ) __snake_case = state_dict["model"] logger.info(F'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(_UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]: os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __snake_case = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Optimizer state saved in {output_optimizer_file}''' ) else: __snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) logger.info(F'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , ) logger.info(F'''Optimizer state saved in {ckpt_dir}''' ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]: accelerator.wait_for_everyone() with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __snake_case = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __snake_case = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' ) else: __snake_case = ( os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if F'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading Optimizer from {ckpt_dir}''' ) __snake_case = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , ) __snake_case = optim_state["optimizer"] logger.info(F'''Optimizer loaded from {ckpt_dir}''' ) __snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) optimizer.load_state_dict(_UpperCAmelCase )
680
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu a : List[Any] = False class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : str ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A ( self : Optional[int] ): """simple docstring""" return 12 @property def A ( self : List[Any] ): """simple docstring""" return 12 @property def A ( self : Tuple ): """simple docstring""" return 32 @property def A ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) __snake_case = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def A ( self : str ): """simple docstring""" torch.manual_seed(0 ) __snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(a_ ) @property def A ( self : Tuple ): """simple docstring""" torch.manual_seed(0 ) __snake_case = 12 __snake_case = 12 __snake_case = { "attention_bias": True, "cross_attention_dim": 32, "attention_head_dim": height * width, "num_attention_heads": 1, "num_vector_embeds": self.num_embed, "num_embeds_ada_norm": self.num_embeds_ada_norm, "norm_num_groups": 32, "sample_size": width, "activation_fn": "geglu-approximate", } __snake_case = TransformeraDModel(**a_ ) return model def A ( self : Tuple ): """simple docstring""" __snake_case = "cpu" __snake_case = self.dummy_vqvae __snake_case = self.dummy_text_encoder __snake_case = self.dummy_tokenizer __snake_case = self.dummy_transformer __snake_case = VQDiffusionScheduler(self.num_embed ) __snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=a_ ) __snake_case = VQDiffusionPipeline( vqvae=a_ , text_encoder=a_ , tokenizer=a_ , transformer=a_ , scheduler=a_ , learned_classifier_free_sampling_embeddings=a_ , ) __snake_case = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = "teddy bear playing in the pool" __snake_case = torch.Generator(device=a_ ).manual_seed(0 ) __snake_case = pipe([prompt] , generator=a_ , num_inference_steps=2 , output_type="np" ) __snake_case = output.images __snake_case = torch.Generator(device=a_ ).manual_seed(0 ) __snake_case = pipe( [prompt] , generator=a_ , output_type="np" , return_dict=a_ , num_inference_steps=2 )[0] __snake_case = image[0, -3:, -3:, -1] __snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __snake_case = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A ( self : Tuple ): """simple docstring""" __snake_case = "cpu" __snake_case = self.dummy_vqvae __snake_case = self.dummy_text_encoder __snake_case = self.dummy_tokenizer __snake_case = self.dummy_transformer __snake_case = VQDiffusionScheduler(self.num_embed ) __snake_case = LearnedClassifierFreeSamplingEmbeddings( learnable=a_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) __snake_case = VQDiffusionPipeline( vqvae=a_ , text_encoder=a_ , tokenizer=a_ , transformer=a_ , scheduler=a_ , learned_classifier_free_sampling_embeddings=a_ , ) __snake_case = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = "teddy bear playing in the pool" __snake_case = torch.Generator(device=a_ ).manual_seed(0 ) __snake_case = pipe([prompt] , generator=a_ , num_inference_steps=2 , output_type="np" ) __snake_case = output.images __snake_case = torch.Generator(device=a_ ).manual_seed(0 ) __snake_case = pipe( [prompt] , generator=a_ , output_type="np" , return_dict=a_ , num_inference_steps=2 )[0] __snake_case = image[0, -3:, -3:, -1] __snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __snake_case = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : str ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : List[Any] ): """simple docstring""" __snake_case = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" ) __snake_case = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" ) __snake_case = pipeline.to(a_ ) pipeline.set_progress_bar_config(disable=a_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __snake_case = torch.Generator(device=a_ ).manual_seed(0 ) __snake_case = pipeline( "teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a_ , output_type="np" , ) __snake_case = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str: if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("iterations must be defined as integers" ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) __snake_case = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_UpperCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a : str = logging.get_logger(__name__) @add_end_docstrings(_UpperCamelCase ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : Tuple , *a_ : List[str] , **a_ : Any ): """simple docstring""" super().__init__(*a_ , **a_ ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def A ( self : int , a_ : Tuple=None , a_ : List[str]=None , a_ : Dict=None ): """simple docstring""" __snake_case = {} __snake_case = {} if prompt is not None: __snake_case = prompt if generate_kwargs is not None: __snake_case = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __snake_case = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( "'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter," " please use only one" ) __snake_case = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] , a_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a_ : Optional[int] ): """simple docstring""" return super().__call__(a_ , **a_ ) def A ( self : Tuple , a_ : Optional[int] , a_ : Tuple=None ): """simple docstring""" __snake_case = load_image(a_ ) if prompt is not None: if not isinstance(a_ , a_ ): raise ValueError( f'''Received an invalid text input, got - {type(a_ )} - but expected a single string. ''' "Note also that one single text can be provided for conditional image to text generation." ) __snake_case = self.model.config.model_type if model_type == "git": __snake_case = self.image_processor(images=a_ , return_tensors=self.framework ) __snake_case = self.tokenizer(text=a_ , add_special_tokens=a_ ).input_ids __snake_case = [self.tokenizer.cls_token_id] + input_ids __snake_case = torch.tensor(a_ ).unsqueeze(0 ) model_inputs.update({"input_ids": input_ids} ) elif model_type == "pix2struct": __snake_case = self.image_processor(images=a_ , header_text=a_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __snake_case = self.image_processor(images=a_ , return_tensors=self.framework ) __snake_case = self.tokenizer(a_ , return_tensors=self.framework ) model_inputs.update(a_ ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: __snake_case = self.image_processor(images=a_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __snake_case = None return model_inputs def A ( self : List[str] , a_ : List[str] , a_ : Optional[int]=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs["input_ids"] , a_ ) and all(x is None for x in model_inputs["input_ids"] ) ): __snake_case = None if generate_kwargs is None: __snake_case = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __snake_case = model_inputs.pop(self.model.main_input_name ) __snake_case = self.model.generate(a_ , **a_ , **a_ ) return model_outputs def A ( self : List[str] , a_ : Any ): """simple docstring""" __snake_case = [] for output_ids in model_outputs: __snake_case = { "generated_text": self.tokenizer.decode( a_ , skip_special_tokens=a_ , ) } records.append(a_ ) return records
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str: if number > 0: raise ValueError("input must be a negative integer" ) __snake_case = len(bin(_UpperCAmelCase )[3:] ) __snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:] __snake_case = ( ( "1" + "0" * (binary_number_length - len(_UpperCAmelCase )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int = 2_00_00_00 ) -> int: __snake_case = [0 for i in range(n + 1 )] __snake_case = 1 __snake_case = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , _UpperCAmelCase ): __snake_case = 1 __snake_case = 0 for i in range(_UpperCAmelCase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F'''{solution() = }''')
680
'''simple docstring''' from timeit import timeit def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int: if number < 0: raise ValueError("the value of input must not be negative" ) __snake_case = 0 while number: number &= number - 1 result += 1 return result def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int: if number < 0: raise ValueError("the value of input must not be negative" ) __snake_case = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def __UpperCAmelCase ( ) -> None: def do_benchmark(_UpperCAmelCase : int ) -> None: __snake_case = "import __main__ as z" print(F'''Benchmark when {number = }:''' ) print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' ) __snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase ) print(F'''timeit() runs in {timing} seconds''' ) print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' ) __snake_case = timeit( "z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , ) print(F'''timeit() runs in {timing} seconds''' ) for number in (25, 37, 58, 0): do_benchmark(_UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
680
1
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() a : Optional[Any] = logging.get_logger(__name__) a : List[Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } a : str = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ) -> Optional[int]: for attribute in key.split("." ): __snake_case = getattr(_UpperCAmelCase , _UpperCAmelCase ) if weight_type is not None: __snake_case = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape else: __snake_case = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __snake_case = value elif weight_type == "weight_g": __snake_case = value elif weight_type == "weight_v": __snake_case = value elif weight_type == "bias": __snake_case = value else: __snake_case = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Tuple: __snake_case = [] __snake_case = fairseq_model.state_dict() __snake_case = hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case = False if "conv_layers" in name: load_conv_layer( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , ) __snake_case = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: __snake_case = True if "*" in mapped_key: __snake_case = name.split(_UpperCAmelCase )[0].split("." )[-2] __snake_case = mapped_key.replace("*" , _UpperCAmelCase ) if "weight_g" in name: __snake_case = "weight_g" elif "weight_v" in name: __snake_case = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: __snake_case = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case = "weight" else: __snake_case = None set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) continue if not is_used: unused_weights.append(_UpperCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> int: __snake_case = full_name.split("conv_layers." )[-1] __snake_case = name.split("." ) __snake_case = int(items[0] ) __snake_case = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __snake_case = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __snake_case = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __snake_case = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __snake_case = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_UpperCAmelCase ) @torch.no_grad() def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=None ) -> Dict: # load the pre-trained checkpoints __snake_case = torch.load(_UpperCAmelCase ) __snake_case = WavLMConfigOrig(checkpoint["cfg"] ) __snake_case = WavLMOrig(_UpperCAmelCase ) model.load_state_dict(checkpoint["model"] ) model.eval() if config_path is not None: __snake_case = WavLMConfig.from_pretrained(_UpperCAmelCase ) else: __snake_case = WavLMConfig() __snake_case = WavLMModel(_UpperCAmelCase ) recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase ) hf_wavlm.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": a : str = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') a : List[Any] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
680
'''simple docstring''' import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch a : Dict = '''sshleifer/bart-tiny-random''' a : str = '''patrickvonplaten/t5-tiny-random''' @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def A ( self : Union[str, Any] ): """simple docstring""" return AutoConfig.from_pretrained(a_ ) def A ( self : str ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def A ( self : Optional[Any] ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ ) def A ( self : Dict ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def A ( self : Optional[int] ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def A ( self : Dict ): """simple docstring""" with self.assertRaises(a_ ): create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
680
1
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float: if density <= 0: raise ValueError("Impossible fluid density" ) if bulk_modulus <= 0: raise ValueError("Impossible bulk modulus" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
680
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors a : Any = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """sequence-classification""" def __init__( self : List[str] , a_ : str ): """simple docstring""" if type(a_ ) == dict: __snake_case = Namespace(**a_ ) __snake_case = glue_output_modes[hparams.task] __snake_case = glue_tasks_num_labels[hparams.task] super().__init__(a_ , a_ , self.mode ) def A ( self : Union[str, Any] , **a_ : List[Any] ): """simple docstring""" return self.model(**a_ ) def A ( self : int , a_ : Optional[Any] , a_ : int ): """simple docstring""" __snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __snake_case = self(**a_ ) __snake_case = outputs[0] __snake_case = self.trainer.lr_schedulers[0]["scheduler"] __snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def A ( self : List[str] ): """simple docstring""" __snake_case = self.hparams __snake_case = processors[args.task]() __snake_case = processor.get_labels() for mode in ["train", "dev"]: __snake_case = self._feature_file(a_ ) if os.path.exists(a_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , a_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) __snake_case = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) __snake_case = convert_examples_to_features( a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , a_ ) torch.save(a_ , a_ ) def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ): """simple docstring""" __snake_case = "dev" if mode == "test" else mode __snake_case = self._feature_file(a_ ) logger.info("Loading features from cached file %s" , a_ ) __snake_case = torch.load(a_ ) __snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) __snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": __snake_case = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": __snake_case = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , ) def A ( self : int , a_ : List[str] , a_ : Tuple ): """simple docstring""" __snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __snake_case = self(**a_ ) __snake_case , __snake_case = outputs[:2] __snake_case = logits.detach().cpu().numpy() __snake_case = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def A ( self : Dict , a_ : Optional[int] ): """simple docstring""" __snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() __snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": __snake_case = np.argmax(a_ , axis=1 ) elif self.hparams.glue_output_mode == "regression": __snake_case = np.squeeze(a_ ) __snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 ) __snake_case = [[] for _ in range(out_label_ids.shape[0] )] __snake_case = [[] for _ in range(out_label_ids.shape[0] )] __snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )} __snake_case = dict(results.items() ) __snake_case = results return ret, preds_list, out_label_list def A ( self : Tuple , a_ : list ): """simple docstring""" __snake_case , __snake_case , __snake_case = self._eval_end(a_ ) __snake_case = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def A ( self : int , a_ : Tuple ): """simple docstring""" __snake_case , __snake_case , __snake_case = self._eval_end(a_ ) __snake_case = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def A ( a_ : str , a_ : Any ): """simple docstring""" BaseTransformer.add_model_specific_args(a_ , a_ ) parser.add_argument( "--max_seq_length" , default=128 , type=a_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def __UpperCAmelCase ( ) -> Union[str, Any]: __snake_case = argparse.ArgumentParser() add_generic_args(_UpperCAmelCase , os.getcwd() ) __snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() ) __snake_case = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: __snake_case = os.path.join( "./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , ) os.makedirs(args.output_dir ) __snake_case = GLUETransformer(_UpperCAmelCase ) __snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: __snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) ) __snake_case = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_UpperCAmelCase ) if __name__ == "__main__": main()
680
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a : Dict = logging.get_logger(__name__) a : List[Any] = { '''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """mgp-str""" def __init__( self : Any , a_ : Optional[Any]=[32, 128] , a_ : Any=4 , a_ : Dict=3 , a_ : List[Any]=27 , a_ : Any=38 , a_ : int=50_257 , a_ : str=30_522 , a_ : Optional[Any]=768 , a_ : List[Any]=12 , a_ : Any=12 , a_ : Tuple=4.0 , a_ : Optional[Any]=True , a_ : Any=False , a_ : Any=1e-5 , a_ : Any=0.0 , a_ : List[str]=0.0 , a_ : Optional[int]=0.0 , a_ : str=False , a_ : str=0.02 , **a_ : Optional[Any] , ): """simple docstring""" super().__init__(**a_ ) __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = max_token_length __snake_case = num_character_labels __snake_case = num_bpe_labels __snake_case = num_wordpiece_labels __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = mlp_ratio __snake_case = distilled __snake_case = layer_norm_eps __snake_case = drop_rate __snake_case = qkv_bias __snake_case = attn_drop_rate __snake_case = drop_path_rate __snake_case = output_aa_attentions __snake_case = initializer_range
680
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] ) @pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase ) __snake_case = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: __snake_case = dataset_size < in_memory_max_size else: __snake_case = False __snake_case = is_small_dataset(_UpperCAmelCase ) assert result == expected
680
1
'''simple docstring''' import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : Any , a_ : Tuple=0.01 , a_ : Tuple=1_000 ): """simple docstring""" __snake_case = p_stop __snake_case = max_length def __iter__( self : Tuple ): """simple docstring""" __snake_case = 0 __snake_case = False while not stop and count < self.max_length: yield count count += 1 __snake_case = random.random() < self.p_stop class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : List[str] , a_ : Tuple , a_ : List[Any] , a_ : List[Any]=False , a_ : List[str]=True ): """simple docstring""" __snake_case = [ BatchSamplerShard(a_ , 2 , a_ , split_batches=a_ , even_batches=a_ ) for i in range(2 ) ] __snake_case = [list(a_ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(a_ ) for shard in batch_sampler_shards] , [len(a_ ) for e in expected] ) self.assertListEqual(a_ , a_ ) def A ( self : List[Any] ): """simple docstring""" __snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(a_ , a_ ) __snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=a_ ) # Expected shouldn't change self.check_batch_sampler_shards(a_ , a_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(a_ , a_ ) __snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a_ , a_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(a_ , a_ ) __snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a_ , a_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(a_ , a_ ) __snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a_ , a_ ) # Check the shards when the dataset is very small. __snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=a_ ) __snake_case = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(a_ , a_ ) __snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=a_ ) __snake_case = [[], []] self.check_batch_sampler_shards(a_ , a_ ) def A ( self : str ): """simple docstring""" __snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=a_ ) __snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ ) __snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=a_ ) # Expected shouldn't change self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ ) # Check the shards when the dataset is not a round multiple of batch size. __snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=a_ ) __snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ ) __snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=a_ ) __snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=a_ ) __snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ ) __snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=a_ ) __snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ ) # Check the shards when the dataset is very small. __snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=a_ ) __snake_case = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ ) __snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=a_ ) __snake_case = [[], []] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(a_ , a_ , even_batches=a_ ) __snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=a_ ) # Expected shouldn't change self.check_batch_sampler_shards(a_ , a_ , even_batches=a_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a_ , a_ , even_batches=a_ ) __snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a_ , a_ , even_batches=a_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(a_ , a_ , even_batches=a_ ) __snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a_ , a_ , even_batches=a_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a_ , a_ , even_batches=a_ ) __snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=a_ ) __snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(a_ , a_ , even_batches=a_ ) # Check the shards when the dataset is very small. __snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=a_ ) __snake_case = [[[0, 1]], []] self.check_batch_sampler_shards(a_ , a_ , even_batches=a_ ) __snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=a_ ) __snake_case = [[], []] self.check_batch_sampler_shards(a_ , a_ , even_batches=a_ ) def A ( self : Optional[int] ): """simple docstring""" __snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=a_ ) __snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ , even_batches=a_ ) __snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=a_ ) # Expected shouldn't change self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ , even_batches=a_ ) # Check the shards when the dataset is not a round multiple of batch size. __snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=a_ ) __snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ , even_batches=a_ ) __snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=a_ ) __snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ , even_batches=a_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=a_ ) __snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ , even_batches=a_ ) __snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=a_ ) __snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ , even_batches=a_ ) # Check the shards when the dataset is very small. __snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=a_ ) __snake_case = [[[0, 1]], []] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ , even_batches=a_ ) __snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=a_ ) __snake_case = [[], []] self.check_batch_sampler_shards(a_ , a_ , split_batches=a_ , even_batches=a_ ) def A ( self : int ): """simple docstring""" __snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __snake_case = [BatchSamplerShard(a_ , 2 , a_ , even_batches=a_ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def A ( self : str , a_ : int , a_ : str , a_ : Union[str, Any] , a_ : int=False , a_ : Dict=2 , a_ : List[str]=False ): """simple docstring""" random.seed(a_ ) __snake_case = list(a_ ) __snake_case = [ IterableDatasetShard( a_ , batch_size=a_ , drop_last=a_ , num_processes=a_ , process_index=a_ , split_batches=a_ , ) for i in range(a_ ) ] __snake_case = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(a_ ) iterable_dataset_lists.append(list(a_ ) ) __snake_case = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __snake_case = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(a_ ) , len(a_ ) ) self.assertTrue(len(a_ ) % shard_batch_size == 0 ) __snake_case = [] for idx in range(0 , len(a_ ) , a_ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(a_ ) < len(a_ ): reference += reference self.assertListEqual(a_ , reference[: len(a_ )] ) def A ( self : Tuple ): """simple docstring""" __snake_case = 42 __snake_case = RandomIterableDataset() self.check_iterable_dataset_shards(a_ , a_ , batch_size=4 , drop_last=a_ , split_batches=a_ ) self.check_iterable_dataset_shards(a_ , a_ , batch_size=4 , drop_last=a_ , split_batches=a_ ) self.check_iterable_dataset_shards(a_ , a_ , batch_size=4 , drop_last=a_ , split_batches=a_ ) self.check_iterable_dataset_shards(a_ , a_ , batch_size=4 , drop_last=a_ , split_batches=a_ ) # Edge case with a very small dataset __snake_case = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(a_ , a_ , batch_size=4 , drop_last=a_ , split_batches=a_ ) self.check_iterable_dataset_shards(a_ , a_ , batch_size=4 , drop_last=a_ , split_batches=a_ ) self.check_iterable_dataset_shards(a_ , a_ , batch_size=4 , drop_last=a_ , split_batches=a_ ) self.check_iterable_dataset_shards(a_ , a_ , batch_size=4 , drop_last=a_ , split_batches=a_ ) def A ( self : Optional[int] ): """simple docstring""" __snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=a_ ) __snake_case = SkipBatchSampler(a_ , 2 ) self.assertListEqual(list(a_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def A ( self : List[Any] ): """simple docstring""" __snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def A ( self : List[Any] ): """simple docstring""" __snake_case = DataLoader(list(range(16 ) ) , batch_size=4 ) __snake_case = skip_first_batches(a_ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def A ( self : Tuple ): """simple docstring""" __snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(a_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(a_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def A ( self : Any ): """simple docstring""" Accelerator() __snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(a_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(a_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float: if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float: if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter a : Optional[int] = logging.get_logger(__name__) a : Dict[Optional[str], Type[Formatter]] = {} a : Dict[Optional[str], str] = {} a : Dict[Optional[str], Exception] = {} def __UpperCAmelCase ( _UpperCAmelCase : type , _UpperCAmelCase : Optional[str] , _UpperCAmelCase : Optional[List[str]] = None , ) -> str: __snake_case = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) __snake_case = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) __snake_case = format_type def __UpperCAmelCase ( _UpperCAmelCase : Exception , _UpperCAmelCase : Optional[str] , _UpperCAmelCase : Optional[List[str]] = None ) -> int: __snake_case = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): __snake_case = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: a : List[Any] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: a : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: a : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def __UpperCAmelCase ( _UpperCAmelCase : Optional[str] ) -> Optional[str]: if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def __UpperCAmelCase ( _UpperCAmelCase : Optional[str] , **_UpperCAmelCase : int ) -> Formatter: __snake_case = get_format_type_from_alias(_UpperCAmelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**_UpperCAmelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
680
'''simple docstring''' from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance a : Any = 6_378_137.0 a : List[Any] = 6_356_752.314_245 a : Dict = 6_378_137 def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float: __snake_case = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values __snake_case = (b_lata + b_lata) / 2 __snake_case = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2) __snake_case = cos(sigma / 2 ) ** 2 __snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2) __snake_case = sin(sigma / 2 ) ** 2 __snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : int ): """simple docstring""" __snake_case = n __snake_case = [None] * self.n __snake_case = 0 # index of the first element __snake_case = 0 __snake_case = 0 def __len__( self : Optional[Any] ): """simple docstring""" return self.size def A ( self : Optional[int] ): """simple docstring""" return self.size == 0 def A ( self : int ): """simple docstring""" return False if self.is_empty() else self.array[self.front] def A ( self : Any , a_ : List[str] ): """simple docstring""" if self.size >= self.n: raise Exception("QUEUE IS FULL" ) __snake_case = data __snake_case = (self.rear + 1) % self.n self.size += 1 return self def A ( self : Dict ): """simple docstring""" if self.size == 0: raise Exception("UNDERFLOW" ) __snake_case = self.array[self.front] __snake_case = None __snake_case = (self.front + 1) % self.n self.size -= 1 return temp
680
'''simple docstring''' import math import sys import cva import numpy as np def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray: # For applying gaussian function for each element in matrix. __snake_case = math.sqrt(_UpperCAmelCase ) __snake_case = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray: __snake_case = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray: # Creates a gaussian kernel of given dimension. __snake_case = np.zeros((kernel_size, kernel_size) ) for i in range(0 , _UpperCAmelCase ): for j in range(0 , _UpperCAmelCase ): __snake_case = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray: __snake_case = np.zeros(img.shape ) __snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase ) __snake_case , __snake_case = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): __snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2] __snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase ) __snake_case = val return imga def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple: __snake_case = args[1] if args[1:] else "../image_data/lena.jpg" __snake_case = float(args[2] ) if args[2:] else 1.0 __snake_case = float(args[3] ) if args[3:] else 1.0 if args[4:]: __snake_case = int(args[4] ) __snake_case = kernel_size + abs(kernel_size % 2 - 1 ) else: __snake_case = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": a , a , a , a : Tuple = parse_args(sys.argv) a : Tuple = cva.imread(filename, 0) cva.imshow('''input image''', img) a : Dict = img / 255 a : str = out.astype('''float32''') a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) a : Dict = out * 255 a : List[str] = np.uinta(out) cva.imshow('''output image''', out) cva.waitKey(0) cva.destroyAllWindows()
680
1
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __UpperCAmelCase ( ) -> Dict: with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" __snake_case = [1, 2, 3] with pytest.raises(_UpperCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 ) with pytest.raises(_UpperCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]: __snake_case = [1, 2] __snake_case = {"a": 1, "b": 2} __snake_case = {"a": [1, 2], "b": [3, 4]} __snake_case = {"a": {"1": 1}, "b": 2} __snake_case = {"a": 1, "b": 2, "c": 3, "d": 4} __snake_case = [2, 3] __snake_case = {"a": 2, "b": 3} __snake_case = {"a": [2, 3], "b": [4, 5]} __snake_case = {"a": {"1": 2}, "b": 3} __snake_case = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
680
'''simple docstring''' class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ): """simple docstring""" __snake_case = name __snake_case = value __snake_case = weight def __repr__( self : Optional[int] ): """simple docstring""" return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def A ( self : Any ): """simple docstring""" return self.value def A ( self : str ): """simple docstring""" return self.name def A ( self : int ): """simple docstring""" return self.weight def A ( self : Tuple ): """simple docstring""" return self.value / self.weight def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]: __snake_case = [] for i in range(len(_UpperCAmelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int: __snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase ) __snake_case = [] __snake_case , __snake_case = 0.0, 0.0 for i in range(len(_UpperCAmelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __UpperCAmelCase ( ) -> Optional[Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = KandinskyVaaControlnetPipeline __SCREAMING_SNAKE_CASE = ["""image_embeds""", """negative_image_embeds""", """hint"""] __SCREAMING_SNAKE_CASE = ["""image_embeds""", """negative_image_embeds""", """hint"""] __SCREAMING_SNAKE_CASE = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] __SCREAMING_SNAKE_CASE = False @property def A ( self : str ): """simple docstring""" return 32 @property def A ( self : Any ): """simple docstring""" return 32 @property def A ( self : Union[str, Any] ): """simple docstring""" return self.time_input_dim @property def A ( self : Union[str, Any] ): """simple docstring""" return self.time_input_dim * 4 @property def A ( self : Any ): """simple docstring""" return 100 @property def A ( self : Any ): """simple docstring""" torch.manual_seed(0 ) __snake_case = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __snake_case = UNetaDConditionModel(**a_ ) return model @property def A ( self : Dict ): """simple docstring""" return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def A ( self : List[str] ): """simple docstring""" torch.manual_seed(0 ) __snake_case = VQModel(**self.dummy_movq_kwargs ) return model def A ( self : Tuple ): """simple docstring""" __snake_case = self.dummy_unet __snake_case = self.dummy_movq __snake_case = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=a_ , set_alpha_to_one=a_ , steps_offset=1 , prediction_type="epsilon" , thresholding=a_ , ) __snake_case = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def A ( self : Optional[Any] , a_ : Dict , a_ : List[str]=0 ): """simple docstring""" __snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(a_ ) ).to(a_ ) __snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( a_ ) # create hint __snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(a_ ) ).to(a_ ) if str(a_ ).startswith("mps" ): __snake_case = torch.manual_seed(a_ ) else: __snake_case = torch.Generator(device=a_ ).manual_seed(a_ ) __snake_case = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def A ( self : Any ): """simple docstring""" __snake_case = "cpu" __snake_case = self.get_dummy_components() __snake_case = self.pipeline_class(**a_ ) __snake_case = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = pipe(**self.get_dummy_inputs(a_ ) ) __snake_case = output.images __snake_case = pipe( **self.get_dummy_inputs(a_ ) , return_dict=a_ , )[0] __snake_case = image[0, -3:, -3:, -1] __snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : Optional[int] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : Dict ): """simple docstring""" __snake_case = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" ) __snake_case = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) __snake_case = torch.from_numpy(np.array(a_ ) ).float() / 255.0 __snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) __snake_case = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(a_ ) __snake_case = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa ) __snake_case = pipeline.to(a_ ) pipeline.set_progress_bar_config(disable=a_ ) __snake_case = "A robot, 4k photo" __snake_case = torch.Generator(device="cuda" ).manual_seed(0 ) __snake_case , __snake_case = pipe_prior( a_ , generator=a_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __snake_case = torch.Generator(device="cuda" ).manual_seed(0 ) __snake_case = pipeline( image_embeds=a_ , negative_image_embeds=a_ , hint=a_ , generator=a_ , num_inference_steps=100 , output_type="np" , ) __snake_case = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(a_ , a_ )
680
'''simple docstring''' import os from math import logaa def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int: __snake_case = 0 __snake_case = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ): __snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) ) if x * logaa(_UpperCAmelCase ) > largest: __snake_case = x * logaa(_UpperCAmelCase ) __snake_case = i + 1 return result if __name__ == "__main__": print(solution())
680
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a : int = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[Any] = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys a : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
680
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer a : List[Any] = logging.get_logger(__name__) a : Dict = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } a : Any = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } a : Optional[int] = { '''facebook/blenderbot_small-90M''': 512, } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ): """simple docstring""" super().__init__( ByteLevelBPETokenizer( vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , ) __snake_case = add_prefix_space def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ): """simple docstring""" __snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ): """simple docstring""" __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
680
1
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType a : Any = get_logger(__name__) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any: os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __snake_case = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if accelerator.process_index == 0: logger.info(F'''Saving model to {output_model_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __snake_case = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Saving model to {output_model_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) logger.info(F'''Saving model to {ckpt_dir}''' ) __snake_case = {"model": state_dict} dist_cp.save_state_dict( state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , ) logger.info(F'''Model saved to {ckpt_dir}''' ) def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]: accelerator.wait_for_everyone() with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return __snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading model from {input_model_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __snake_case = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading model from {input_model_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __snake_case = ( os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' ) if F'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading model from {ckpt_dir}''' ) __snake_case = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , ) __snake_case = state_dict["model"] logger.info(F'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(_UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]: os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __snake_case = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Optimizer state saved in {output_optimizer_file}''' ) else: __snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) logger.info(F'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , ) logger.info(F'''Optimizer state saved in {ckpt_dir}''' ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]: accelerator.wait_for_everyone() with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __snake_case = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __snake_case = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' ) else: __snake_case = ( os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if F'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading Optimizer from {ckpt_dir}''' ) __snake_case = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , ) __snake_case = optim_state["optimizer"] logger.info(F'''Optimizer loaded from {ckpt_dir}''' ) __snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) optimizer.load_state_dict(_UpperCAmelCase )
680
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : str = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
680
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """dandelin/vilt-b32-finetuned-vqa""" __SCREAMING_SNAKE_CASE = ( """This is a tool that answers a question about an image. It takes an input named `image` which should be the """ """image containing the information, as well as a `question` which should be the question in English. It """ """returns a text that is the answer to the question.""" ) __SCREAMING_SNAKE_CASE = """image_qa""" __SCREAMING_SNAKE_CASE = AutoProcessor __SCREAMING_SNAKE_CASE = AutoModelForVisualQuestionAnswering __SCREAMING_SNAKE_CASE = ["""image""", """text"""] __SCREAMING_SNAKE_CASE = ["""text"""] def __init__( self : str , *a_ : Dict , **a_ : Dict ): """simple docstring""" requires_backends(self , ["vision"] ) super().__init__(*a_ , **a_ ) def A ( self : Tuple , a_ : "Image" , a_ : str ): """simple docstring""" return self.pre_processor(a_ , a_ , return_tensors="pt" ) def A ( self : Dict , a_ : Union[str, Any] ): """simple docstring""" with torch.no_grad(): return self.model(**a_ ).logits def A ( self : List[Any] , a_ : List[str] ): """simple docstring""" __snake_case = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
680
'''simple docstring''' # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers a : Optional[Any] = float('''nan''') class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : Optional[int] ): """simple docstring""" __snake_case = sys.stdout __snake_case = open(a_ , "a" ) def __getattr__( self : str , a_ : List[Any] ): """simple docstring""" return getattr(self.stdout , a_ ) def A ( self : Union[str, Any] , a_ : List[Any] ): """simple docstring""" self.stdout.write(a_ ) # strip tqdm codes self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) ) def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]: __snake_case = [] # deal with critical env vars __snake_case = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: __snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase ) if val is not None: cmd.append(F'''{key}={val}''' ) # python executable (not always needed if the script is executable) __snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(_UpperCAmelCase ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes __snake_case = [] __snake_case = "" while len(_UpperCAmelCase ) > 0: current_line += F'''{cmd.pop(0 )} ''' if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(_UpperCAmelCase ) __snake_case = "" return "\\\n".join(_UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple: # unwrap multi-line input __snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own __snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += F''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir __snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str: # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , ) __snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams __snake_case = variation.replace(" " , "-" ) with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f: f.write(result.stdout ) with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f: __snake_case = json.load(_UpperCAmelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict: __snake_case = [] __snake_case = [] __snake_case = F'''{id}: {variation:<{longest_variation_len}}''' __snake_case = F'''{preamble}: ''' __snake_case = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ): __snake_case = process_run_single( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case = single_run_metrics[target_metric_key] if not math.isnan(_UpperCAmelCase ): metrics.append(_UpperCAmelCase ) results.append(_UpperCAmelCase ) outcome += "✓" else: outcome += "✘" __snake_case = F'''\33[2K\r{outcome}''' if len(_UpperCAmelCase ) > 0: __snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} __snake_case = round(mean_metrics[target_metric_key] , 2 ) __snake_case = F'''{outcome} {mean_target}''' if len(_UpperCAmelCase ) > 1: results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}''' print(_UpperCAmelCase ) __snake_case = variation return mean_metrics else: print(_UpperCAmelCase ) return {variation_key: variation, target_metric_key: nan} def __UpperCAmelCase ( ) -> Optional[int]: __snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) ) return F''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]: __snake_case = pd.DataFrame(_UpperCAmelCase ) __snake_case = "variation" __snake_case = "diff_%" __snake_case = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan __snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(_UpperCAmelCase ): # as a fallback, use the minimal value as the sentinel __snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(_UpperCAmelCase ): __snake_case = df.apply( lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns __snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys] __snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols # capitalize __snake_case = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible __snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" ) __snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" ) __snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )] print("\n\n".join(_UpperCAmelCase ) ) def __UpperCAmelCase ( ) -> Dict: __snake_case = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , ) parser.add_argument( "--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) __snake_case = parser.parse_args() __snake_case = args.output_dir Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) __snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase ) # split each dimension into its --foo variations __snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty __snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) ) __snake_case = max(len(_UpperCAmelCase ) for x in variations ) # split wanted keys __snake_case = args.report_metric_keys.split() # capture prints into a log file for convenience __snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(F'''and this script\'s output is also piped into {report_fn}''' ) __snake_case = Tee(_UpperCAmelCase ) print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' ) print(F'''Base command: {" ".join(_UpperCAmelCase )}''' ) __snake_case = "variation" __snake_case = [] for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ): __snake_case = base_cmd + variation.split() results.append( process_run( id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) ) process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase ) if __name__ == "__main__": main()
680
1
'''simple docstring''' from math import sqrt def __UpperCAmelCase ( _UpperCAmelCase : int ) -> bool: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" __snake_case = True # 0 and 1 are none primes. if number <= 1: __snake_case = False for divisor in range(2 , int(round(sqrt(_UpperCAmelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __snake_case = False break # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'status' must been from type bool" return status def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Dict: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __snake_case = list(range(2 , n + 1 ) ) __snake_case = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_UpperCAmelCase ) ): for j in range(i + 1 , len(_UpperCAmelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __snake_case = 0 # filters actual prime numbers. __snake_case = [x for x in begin_list if x != 0] # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list" return ans def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> Any: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2" __snake_case = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_UpperCAmelCase ): ans.append(_UpperCAmelCase ) # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list" return ans def __UpperCAmelCase ( _UpperCAmelCase : str ) -> List[str]: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and number >= 0, "'number' must been an int and >= 0" __snake_case = [] # this list will be returns of the function. # potential prime number factors. __snake_case = 2 __snake_case = number if number == 0 or number == 1: ans.append(_UpperCAmelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_UpperCAmelCase ): while quotient != 1: if is_prime(_UpperCAmelCase ) and (quotient % factor == 0): ans.append(_UpperCAmelCase ) quotient /= factor else: factor += 1 else: ans.append(_UpperCAmelCase ) # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list" return ans def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> Any: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" __snake_case = 0 # prime factorization of 'number' __snake_case = prime_factorization(_UpperCAmelCase ) __snake_case = max(_UpperCAmelCase ) # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int" return ans def __UpperCAmelCase ( _UpperCAmelCase : int ) -> Any: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" __snake_case = 0 # prime factorization of 'number' __snake_case = prime_factorization(_UpperCAmelCase ) __snake_case = min(_UpperCAmelCase ) # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int" return ans def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Union[str, Any]: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _UpperCAmelCase ), "compare bust been from type bool" return number % 2 == 0 def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> int: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _UpperCAmelCase ), "compare bust been from type bool" return number % 2 != 0 def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> int: assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (number > 2) and is_even(_UpperCAmelCase ) ), "'number' must been an int, even and > 2" __snake_case = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __snake_case = get_prime_numbers(_UpperCAmelCase ) __snake_case = len(_UpperCAmelCase ) # run variable for while-loops. __snake_case = 0 __snake_case = None # exit variable. for break up the loops __snake_case = True while i < len_pn and loop: __snake_case = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __snake_case = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (len(_UpperCAmelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> int: assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __snake_case = 0 while numbera != 0: __snake_case = numbera % numbera __snake_case = numbera __snake_case = rest # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> List[str]: assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __snake_case = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __snake_case = prime_factorization(_UpperCAmelCase ) __snake_case = prime_factorization(_UpperCAmelCase ) elif numbera == 1 or numbera == 1: __snake_case = [] __snake_case = [] __snake_case = max(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = 0 __snake_case = 0 __snake_case = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __snake_case = prime_fac_a.count(_UpperCAmelCase ) __snake_case = prime_fac_a.count(_UpperCAmelCase ) for _ in range(max(_UpperCAmelCase , _UpperCAmelCase ) ): ans *= n else: __snake_case = prime_fac_a.count(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ): ans *= n done.append(_UpperCAmelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __snake_case = prime_fac_a.count(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ): ans *= n done.append(_UpperCAmelCase ) # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> Any: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'number' must been a positive int" __snake_case = 0 __snake_case = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_UpperCAmelCase ): ans += 1 # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and is_prime( _UpperCAmelCase ), "'ans' must been a prime number and from type int" return ans def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ) -> Union[str, Any]: assert ( is_prime(_UpperCAmelCase ) and is_prime(_UpperCAmelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __snake_case = p_number_a + 1 # jump to the next number __snake_case = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_UpperCAmelCase ): number += 1 while number < p_number_a: ans.append(_UpperCAmelCase ) number += 1 # fetch the next prime number. while not is_prime(_UpperCAmelCase ): number += 1 # precondition assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ans[0] != p_number_a and ans[len(_UpperCAmelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> str: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 1), "'n' must been int and >= 1" __snake_case = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_UpperCAmelCase ) # precondition assert ans[0] == 1 and ans[len(_UpperCAmelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> Optional[int]: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number > 1 ), "'number' must been an int and >= 1" __snake_case = get_divisors(_UpperCAmelCase ) # precondition assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (divisors[0] == 1) and (divisors[len(_UpperCAmelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ) -> Optional[Any]: assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __snake_case = gcd(abs(_UpperCAmelCase ) , abs(_UpperCAmelCase ) ) # precondition assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> Optional[Any]: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been a int and >= 0" __snake_case = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> Optional[Any]: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been an int and >= 0" __snake_case = 0 __snake_case = 1 __snake_case = 1 # this will be return for _ in range(n - 1 ): __snake_case = ans ans += fiba __snake_case = tmp return ans
680
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __UpperCAmelCase ( ) -> Dict: with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" __snake_case = [1, 2, 3] with pytest.raises(_UpperCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 ) with pytest.raises(_UpperCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]: __snake_case = [1, 2] __snake_case = {"a": 1, "b": 2} __snake_case = {"a": [1, 2], "b": [3, 4]} __snake_case = {"a": {"1": 1}, "b": 2} __snake_case = {"a": 1, "b": 2, "c": 3, "d": 4} __snake_case = [2, 3] __snake_case = {"a": 2, "b": 3} __snake_case = {"a": [2, 3], "b": [4, 5]} __snake_case = {"a": {"1": 2}, "b": 3} __snake_case = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
680
1
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : list[list] ) -> list[list]: __snake_case = current_set.copy() for row_index, row in enumerate(_UpperCAmelCase ): __snake_case = row[0] for column_index, column in enumerate(_UpperCAmelCase ): if magnitude == 0: __snake_case = column continue __snake_case = column / magnitude # Subtract to cancel term __snake_case = current_set[0] __snake_case = [first_row] __snake_case = current_set[1::] for row in current_set: __snake_case = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(_UpperCAmelCase ) continue for column_index in range(len(_UpperCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(_UpperCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: __snake_case = final_set[0] __snake_case = [] __snake_case = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __snake_case = simplify(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , _UpperCAmelCase ) __snake_case = resultant return final_set def __UpperCAmelCase ( _UpperCAmelCase : list[list] ) -> list: if len(_UpperCAmelCase ) == 0: raise IndexError("solve_simultaneous() requires n lists of length n+1" ) __snake_case = len(_UpperCAmelCase ) + 1 if any(len(_UpperCAmelCase ) != _length for item in equations ): raise IndexError("solve_simultaneous() requires n lists of length n+1" ) for row in equations: if any(not isinstance(_UpperCAmelCase , (int, float) ) for column in row ): raise ValueError("solve_simultaneous() requires lists of integers" ) if len(_UpperCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] __snake_case = equations.copy() if any(0 in row for row in data_set ): __snake_case = data_set.copy() __snake_case = [] for row_index, row in enumerate(_UpperCAmelCase ): if 0 not in row: __snake_case = data_set.pop(_UpperCAmelCase ) break if not full_row: raise ValueError("solve_simultaneous() requires at least 1 full equation" ) data_set.insert(0 , _UpperCAmelCase ) __snake_case = data_set.copy() __snake_case = simplify(_UpperCAmelCase ) __snake_case = simplified[::-1] __snake_case = [] for row in simplified: __snake_case = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __snake_case = row.copy()[: len(_UpperCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(_UpperCAmelCase ) == 0: solutions.append(0 ) continue __snake_case = temp_row[1::] __snake_case = temp_row[::-1] for column_index, column in enumerate(_UpperCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(_UpperCAmelCase ) __snake_case = [] for item in solutions: final.append(float(round(_UpperCAmelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() a : List[Any] = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
680
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : int = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """mobilenet_v2""" def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ): """simple docstring""" super().__init__(**a_ ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) __snake_case = num_channels __snake_case = image_size __snake_case = depth_multiplier __snake_case = depth_divisible_by __snake_case = min_depth __snake_case = expand_ratio __snake_case = output_stride __snake_case = first_layer_is_expansion __snake_case = finegrained_output __snake_case = hidden_act __snake_case = tf_padding __snake_case = classifier_dropout_prob __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = semantic_loss_ignore_index class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = version.parse("""1.11""" ) @property def A ( self : Optional[int] ): """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def A ( self : Optional[int] ): """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def A ( self : int ): """simple docstring""" return 1e-4
680
1
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a : List[Any] = logging.get_logger(__name__) a : Dict = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) a : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Optional[int]: for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: __snake_case = model_type_to_module_name(_UpperCAmelCase ) __snake_case = importlib.import_module(F'''.{module_name}''' , "transformers.models" ) try: return getattr(_UpperCAmelCase , _UpperCAmelCase ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(_UpperCAmelCase , "__name__" , _UpperCAmelCase ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. __snake_case = importlib.import_module("transformers" ) if hasattr(_UpperCAmelCase , _UpperCAmelCase ): return getattr(_UpperCAmelCase , _UpperCAmelCase ) return None def __UpperCAmelCase ( _UpperCAmelCase : Union[str, os.PathLike] , _UpperCAmelCase : Optional[Union[str, os.PathLike]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[Dict[str, str]] = None , _UpperCAmelCase : Optional[Union[bool, str]] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , **_UpperCAmelCase : Optional[Any] , ) -> Tuple: __snake_case = get_file_from_repo( _UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , ) if resolved_config_file is None: logger.info( "Could not locate the image processor configuration file, will try to use the model config instead." ) return {} with open(_UpperCAmelCase , encoding="utf-8" ) as reader: return json.load(_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : def __init__( self : Optional[int] ): """simple docstring""" raise EnvironmentError( "AutoImageProcessor is designed to be instantiated " "using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(a_ ) def A ( cls : int , a_ : Optional[Any] , **a_ : int ): """simple docstring""" __snake_case = kwargs.pop("config" , a_ ) __snake_case = kwargs.pop("trust_remote_code" , a_ ) __snake_case = True __snake_case , __snake_case = ImageProcessingMixin.get_image_processor_dict(a_ , **a_ ) __snake_case = config_dict.get("image_processor_type" , a_ ) __snake_case = None if "AutoImageProcessor" in config_dict.get("auto_map" , {} ): __snake_case = config_dict["auto_map"]["AutoImageProcessor"] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: __snake_case = config_dict.pop("feature_extractor_type" , a_ ) if feature_extractor_class is not None: logger.warning( "Could not find image processor class in the image processor config or the model config. Loading" " based on pattern matching with the model's feature extractor configuration." ) __snake_case = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" ) if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): __snake_case = config_dict["auto_map"]["AutoFeatureExtractor"] __snake_case = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" ) logger.warning( "Could not find image processor auto map in the image processor config or the model config." " Loading based on pattern matching with the model's feature extractor configuration." ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(a_ , a_ ): __snake_case = AutoConfig.from_pretrained(a_ , **a_ ) # It could be in `config.image_processor_type`` __snake_case = getattr(a_ , "image_processor_type" , a_ ) if hasattr(a_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map: __snake_case = config.auto_map["AutoImageProcessor"] if image_processor_class is not None: __snake_case = image_processor_class_from_name(a_ ) __snake_case = image_processor_auto_map is not None __snake_case = image_processor_class is not None or type(a_ ) in IMAGE_PROCESSOR_MAPPING __snake_case = resolve_trust_remote_code( a_ , a_ , a_ , a_ ) if has_remote_code and trust_remote_code: __snake_case = get_class_from_dynamic_module( a_ , a_ , **a_ ) __snake_case = kwargs.pop("code_revision" , a_ ) if os.path.isdir(a_ ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(a_ , **a_ ) elif image_processor_class is not None: return image_processor_class.from_dict(a_ , **a_ ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(a_ ) in IMAGE_PROCESSOR_MAPPING: __snake_case = IMAGE_PROCESSOR_MAPPING[type(a_ )] return image_processor_class.from_dict(a_ , **a_ ) raise ValueError( f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ''' f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def A ( a_ : List[Any] , a_ : List[Any] ): """simple docstring""" IMAGE_PROCESSOR_MAPPING.register(a_ , a_ )
680
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : List[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """data2vec-text""" def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ): """simple docstring""" super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = hidden_act __snake_case = intermediate_size __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = position_embedding_type __snake_case = use_cache __snake_case = classifier_dropout class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): @property def A ( self : Any ): """simple docstring""" if self.task == "multiple-choice": __snake_case = {0: "batch", 1: "choice", 2: "sequence"} else: __snake_case = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
680
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''} a : Optional[int] = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', } } a : Tuple = { '''camembert-base''': 512, } a : Tuple = '''▁''' class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""] def __init__( self : Tuple , a_ : Dict , a_ : str="<s>" , a_ : Dict="</s>" , a_ : Union[str, Any]="</s>" , a_ : int="<s>" , a_ : List[Any]="<unk>" , a_ : Optional[int]="<pad>" , a_ : Optional[int]="<mask>" , a_ : Dict=["<s>NOTUSED", "</s>NOTUSED"] , a_ : Optional[Dict[str, Any]] = None , **a_ : Union[str, Any] , ): """simple docstring""" __snake_case = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token __snake_case = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , ) __snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(a_ ) ) __snake_case = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __snake_case = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} __snake_case = len(self.fairseq_tokens_to_ids ) __snake_case = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def A ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case = [self.cls_token_id] __snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ ) if token_ids_a is None: return [1] + ([0] * len(a_ )) + [1] return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1] def A ( self : Optional[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ): """simple docstring""" __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def A ( self : int ): """simple docstring""" return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A ( self : str , a_ : str ): """simple docstring""" return self.sp_model.encode(a_ , out_type=a_ ) def A ( self : str , a_ : Union[str, Any] ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(a_ ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(a_ ) def A ( self : Optional[int] , a_ : Tuple ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def A ( self : Union[str, Any] , a_ : Optional[Any] ): """simple docstring""" __snake_case = [] __snake_case = "" __snake_case = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(a_ ) + token __snake_case = True __snake_case = [] else: current_sub_tokens.append(a_ ) __snake_case = False out_string += self.sp_model.decode(a_ ) return out_string.strip() def __getstate__( self : Dict ): """simple docstring""" __snake_case = self.__dict__.copy() __snake_case = None return state def __setstate__( self : Union[str, Any] , a_ : List[str] ): """simple docstring""" __snake_case = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __snake_case = {} __snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A ( self : str , a_ : str , a_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(a_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __snake_case = os.path.join( a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a_ ) elif not os.path.isfile(self.vocab_file ): with open(a_ , "wb" ) as fi: __snake_case = self.sp_model.serialized_model_proto() fi.write(a_ ) return (out_vocab_file,)
680
'''simple docstring''' import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) a : Tuple = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ): """simple docstring""" __snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] ) __snake_case = layer_outputs[0] return hidden_states @add_start_docstrings( """The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : int , a_ : int ): """simple docstring""" super().__init__(a_ ) __snake_case = BertEncoderWithPabee(a_ ) self.init_weights() __snake_case = 0 __snake_case = 0 __snake_case = 0 __snake_case = 0 def A ( self : Optional[int] , a_ : Union[str, Any] ): """simple docstring""" __snake_case = threshold def A ( self : Optional[Any] , a_ : Union[str, Any] ): """simple docstring""" __snake_case = patience def A ( self : Any ): """simple docstring""" __snake_case = 0 __snake_case = 0 def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.inference_layers_num / self.inference_instances_num __snake_case = ( f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =''' f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***''' ) print(a_ ) @add_start_docstrings_to_model_forward(a_ ) def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ): """simple docstring""" if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: __snake_case = input_ids.size() elif inputs_embeds is not None: __snake_case = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) __snake_case = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __snake_case = torch.ones(a_ , device=a_ ) if token_type_ids is None: __snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __snake_case = self.get_extended_attention_mask(a_ , a_ , a_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: __snake_case , __snake_case , __snake_case = encoder_hidden_states.size() __snake_case = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: __snake_case = torch.ones(a_ , device=a_ ) __snake_case = self.invert_attention_mask(a_ ) else: __snake_case = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers ) __snake_case = self.embeddings( input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ ) __snake_case = embedding_output if self.training: __snake_case = [] for i in range(self.config.num_hidden_layers ): __snake_case = self.encoder.adaptive_forward( a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ ) __snake_case = self.pooler(a_ ) __snake_case = output_layers[i](output_dropout(a_ ) ) res.append(a_ ) elif self.patience == 0: # Use all layers for inference __snake_case = self.encoder( a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , ) __snake_case = self.pooler(encoder_outputs[0] ) __snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )] else: __snake_case = 0 __snake_case = None __snake_case = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 __snake_case = self.encoder.adaptive_forward( a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ ) __snake_case = self.pooler(a_ ) __snake_case = output_layers[i](a_ ) if regression: __snake_case = logits.detach() if patient_result is not None: __snake_case = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: __snake_case = 0 else: __snake_case = logits.detach().argmax(dim=1 ) if patient_result is not None: __snake_case = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(a_ ) ): patient_counter += 1 else: __snake_case = 0 __snake_case = logits if patient_counter == self.patience: break __snake_case = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( """Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : List[str] , a_ : Tuple ): """simple docstring""" super().__init__(a_ ) __snake_case = config.num_labels __snake_case = BertModelWithPabee(a_ ) __snake_case = nn.Dropout(config.hidden_dropout_prob ) __snake_case = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(a_ ) def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ): """simple docstring""" __snake_case = self.bert( input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) __snake_case = (logits[-1],) if labels is not None: __snake_case = None __snake_case = 0 for ix, logits_item in enumerate(a_ ): if self.num_labels == 1: # We are doing regression __snake_case = MSELoss() __snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: __snake_case = CrossEntropyLoss() __snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: __snake_case = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 __snake_case = (total_loss / total_weights,) + outputs return outputs
680
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor a : Union[str, Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : int , *a_ : str , **a_ : Optional[int] ): """simple docstring""" warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , a_ , ) super().__init__(*a_ , **a_ )
680
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = is_training __snake_case = use_labels __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = backbone_out_indices __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = initializer_range __snake_case = num_labels __snake_case = backbone_featmap_shape __snake_case = scope __snake_case = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) __snake_case = (image_size // patch_size) ** 2 __snake_case = num_patches + 1 def A ( self : int ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __snake_case = self.get_config() return config, pixel_values, labels def A ( self : Optional[Any] ): """simple docstring""" __snake_case = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [96, 192, 384, 768], "num_groups": 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , ) def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ): """simple docstring""" __snake_case = DPTModel(config=a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ): """simple docstring""" __snake_case = self.num_labels __snake_case = DPTForDepthEstimation(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ): """simple docstring""" __snake_case = self.num_labels __snake_case = DPTForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , labels=a_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def A ( self : List[Any] ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () __SCREAMING_SNAKE_CASE = ( { """depth-estimation""": DPTForDepthEstimation, """feature-extraction""": DPTModel, """image-segmentation""": DPTForSemanticSegmentation, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def A ( self : Optional[Any] ): """simple docstring""" __snake_case = DPTModelTester(self ) __snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 ) def A ( self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DPT does not use inputs_embeds" ) def A ( self : Any ): """simple docstring""" pass def A ( self : Union[str, Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ , nn.Linear ) ) def A ( self : List[str] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a_ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ["pixel_values"] self.assertListEqual(arg_names[:1] , a_ ) def A ( self : int ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*a_ ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def A ( self : Optional[int] ): """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = True if model_class in get_values(a_ ): continue __snake_case = model_class(a_ ) model.to(a_ ) model.train() __snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ ) __snake_case = model(**a_ ).loss loss.backward() def A ( self : int ): """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = False __snake_case = True if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing: continue __snake_case = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() __snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ ) __snake_case = model(**a_ ).loss loss.backward() def A ( self : Dict ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = _config_zero_init(a_ ) for model_class in self.all_model_classes: __snake_case = model_class(config=a_ ) # Skip the check for the backbone __snake_case = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": __snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : Tuple ): """simple docstring""" pass @slow def A ( self : int ): """simple docstring""" for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: __snake_case = DPTModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def A ( self : int ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = "add" with self.assertRaises(a_ ): __snake_case = DPTForDepthEstimation(a_ ) def __UpperCAmelCase ( ) -> Union[str, Any]: __snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : Dict ): """simple docstring""" __snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" ) __snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ ) __snake_case = prepare_img() __snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ ) # forward pass with torch.no_grad(): __snake_case = model(**a_ ) __snake_case = outputs.predicted_depth # verify the predicted depth __snake_case = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , a_ ) __snake_case = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
680
1
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> List[str]: # Initialise PyTorch model __snake_case = BertConfig.from_json_file(_UpperCAmelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) __snake_case = BertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": a : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) a : Dict = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
680
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def A ( self : Any ): """simple docstring""" return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
680
1
'''simple docstring''' from manim import * class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : Any ): """simple docstring""" __snake_case = Rectangle(height=0.5 , width=0.5 ) __snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __snake_case = [mem.copy() for i in range(6 )] __snake_case = [mem.copy() for i in range(6 )] __snake_case = VGroup(*a_ ).arrange(a_ , buff=0 ) __snake_case = VGroup(*a_ ).arrange(a_ , buff=0 ) __snake_case = VGroup(a_ , a_ ).arrange(a_ , buff=0 ) __snake_case = Text("CPU" , font_size=24 ) __snake_case = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(a_ ) __snake_case = [mem.copy() for i in range(4 )] __snake_case = VGroup(*a_ ).arrange(a_ , buff=0 ) __snake_case = Text("GPU" , font_size=24 ) __snake_case = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ ) gpu.move_to([-1, -1, 0] ) self.add(a_ ) __snake_case = [mem.copy() for i in range(6 )] __snake_case = VGroup(*a_ ).arrange(a_ , buff=0 ) __snake_case = Text("Model" , font_size=24 ) __snake_case = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ ) model.move_to([3, -1.0, 0] ) self.add(a_ ) __snake_case = [] for i, rect in enumerate(a_ ): rect.set_stroke(a_ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) __snake_case = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a_ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=a_ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=a_ , buff=0.0 ) self.add(a_ ) cpu_targs.append(a_ ) __snake_case = [mem.copy() for i in range(6 )] __snake_case = VGroup(*a_ ).arrange(a_ , buff=0 ) __snake_case = Text("Loaded Checkpoint" , font_size=24 ) __snake_case = Group(a_ , a_ ).arrange(a_ , aligned_edge=a_ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) __snake_case = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __snake_case = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(a_ , a_ ) __snake_case = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(a_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) __snake_case = MarkupText( f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(a_ ) , Write(a_ ) ) self.play(Write(a_ , run_time=1 ) , Create(a_ , run_time=1 ) ) __snake_case = [] __snake_case = [] for i, rect in enumerate(a_ ): __snake_case = fill.copy().set_fill(a_ , opacity=0.7 ) target.move_to(a_ ) first_animations.append(GrowFromCenter(a_ , run_time=1 ) ) __snake_case = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(a_ , run_time=1.5 ) ) self.play(*a_ ) self.play(*a_ ) self.wait()
680
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device a : Optional[Any] = False class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): pass @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : int ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : List[Any] ): """simple docstring""" __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" ) # remove text_unet pipe.remove_unused_weights() pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = "A painting of a squirrel eating a burger " __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a_ ) __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = generator.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def A ( self : Optional[int] ): """simple docstring""" __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained( "shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = "A painting of a squirrel eating a burger " __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images __snake_case = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) __snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
680
1
'''simple docstring''' import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=None , ) -> List[Any]: if attention_mask is None: __snake_case = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __snake_case = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __snake_case = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_UpperCAmelCase ) if decoder_head_mask is None: __snake_case = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_UpperCAmelCase ) if cross_attn_head_mask is None: __snake_case = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_UpperCAmelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] , a_ : int , a_ : Union[str, Any]=13 , a_ : Optional[int]=7 , a_ : Tuple=True , a_ : str=False , a_ : Any=99 , a_ : List[Any]=16 , a_ : Optional[Any]=2 , a_ : Optional[Any]=4 , a_ : List[Any]=4 , a_ : Tuple="relu" , a_ : List[Any]=0.1 , a_ : List[Any]=0.1 , a_ : Any=0.0 , a_ : Optional[int]=0.0 , a_ : int=20 , a_ : List[Any]=2 , a_ : str=1 , a_ : Union[str, Any]=0 , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = encoder_layerdrop __snake_case = decoder_layerdrop __snake_case = max_position_embeddings __snake_case = eos_token_id __snake_case = pad_token_id __snake_case = bos_token_id def A ( self : Any ): """simple docstring""" __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = self.eos_token_id # Eos Token __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __snake_case = input_ids.clamp(self.pad_token_id + 1 ) __snake_case = decoder_input_ids.clamp(self.pad_token_id + 1 ) __snake_case = self.get_config() __snake_case = prepare_mam_aaa_inputs_dict(a_ , a_ , a_ ) return config, inputs_dict def A ( self : List[Any] ): """simple docstring""" return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def A ( self : int ): """simple docstring""" __snake_case , __snake_case = self.prepare_config_and_inputs() return config, inputs_dict def A ( self : List[str] , a_ : int , a_ : Union[str, Any] ): """simple docstring""" __snake_case = MaMaaaModel(config=a_ ).get_decoder().to(a_ ).eval() __snake_case = inputs_dict["input_ids"] __snake_case = inputs_dict["attention_mask"] __snake_case = inputs_dict["head_mask"] # first forward pass __snake_case = model(a_ , attention_mask=a_ , head_mask=a_ , use_cache=a_ ) __snake_case , __snake_case = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size ) __snake_case = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __snake_case = torch.cat([input_ids, next_tokens] , dim=-1 ) __snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __snake_case = model(a_ , attention_mask=a_ )["last_hidden_state"] __snake_case = model(a_ , attention_mask=a_ , past_key_values=a_ )[ "last_hidden_state" ] # select random slice __snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item() __snake_case = output_from_no_past[:, -3:, random_slice_idx].detach() __snake_case = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-2 ) ) def A ( self : Tuple , a_ : List[str] , a_ : int ): """simple docstring""" __snake_case = MaMaaaModel(config=a_ ).to(a_ ).eval() __snake_case = model(**a_ ) __snake_case = outputs.encoder_last_hidden_state __snake_case = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: __snake_case = model.get_encoder() encoder.save_pretrained(a_ ) __snake_case = MaMaaaEncoder.from_pretrained(a_ ).to(a_ ) __snake_case = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: __snake_case = model.get_decoder() decoder.save_pretrained(a_ ) __snake_case = MaMaaaDecoder.from_pretrained(a_ ).to(a_ ) __snake_case = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=a_ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = (MaMaaaForConditionalGeneration,) if is_torch_available() else () __SCREAMING_SNAKE_CASE = ( { """conversational""": MaMaaaForConditionalGeneration, """feature-extraction""": MaMaaaModel, """summarization""": MaMaaaForConditionalGeneration, """text2text-generation""": MaMaaaForConditionalGeneration, """translation""": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def A ( self : List[str] , a_ : Union[str, Any] , a_ : str , a_ : str , a_ : Union[str, Any] , a_ : Optional[Any] ): """simple docstring""" if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def A ( self : str ): """simple docstring""" __snake_case = MaMaaaModelTester(self ) __snake_case = ConfigTester(self , config_class=a_ ) def A ( self : int ): """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[int] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: __snake_case = model_class(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(a_ ) __snake_case , __snake_case = model_class.from_pretrained(a_ , output_loading_info=a_ ) self.assertEqual(info["missing_keys"] , [] ) def A ( self : str ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*a_ ) def A ( self : Any ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*a_ ) def A ( self : List[Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): __snake_case = model_class(a_ ) model.to(a_ ) model.eval() __snake_case = copy.deepcopy(self._prepare_for_class(a_ , a_ ) ) if not self.is_encoder_decoder: __snake_case = inputs["input_ids"] del inputs["input_ids"] else: __snake_case = inputs["input_ids"] __snake_case = inputs.get("decoder_input_ids" , a_ ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , a_ ) __snake_case = model.get_input_embeddings() if not self.is_encoder_decoder: __snake_case = wte(a_ ) else: __snake_case = wte(a_ ) __snake_case = wte(a_ ) with torch.no_grad(): model(**a_ )[0] def A ( self : Union[str, Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs() __snake_case = input_dict["input_ids"] __snake_case = input_ids.ne(1 ).to(a_ ) __snake_case = MaMaaaForConditionalGeneration(a_ ).eval().to(a_ ) if torch_device == "cuda": model.half() model.generate(a_ , attention_mask=a_ ) model.generate(num_beams=4 , do_sample=a_ , early_stopping=a_ , num_return_sequences=3 ) def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> List[str]: return torch.tensor(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ) a : int = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def A ( self : List[Any] ): """simple docstring""" return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def A ( self : Tuple ): """simple docstring""" __snake_case = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(a_ ) __snake_case = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) __snake_case = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) __snake_case = prepare_mam_aaa_inputs_dict(model.config , a_ , a_ ) with torch.no_grad(): __snake_case = model(**a_ )[0] __snake_case = torch.Size((1, 11, 1_024) ) self.assertEqual(output.shape , a_ ) # change to expected output here __snake_case = torch.tensor( [[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=a_ ) self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=a_ ) ) def A ( self : List[Any] ): """simple docstring""" __snake_case = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(a_ ) # change to intended input __snake_case = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) __snake_case = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) __snake_case = prepare_mam_aaa_inputs_dict(model.config , a_ , a_ ) with torch.no_grad(): __snake_case = model(**a_ )[0] __snake_case = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , a_ ) # change to expected output here __snake_case = torch.tensor( [[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=a_ ) self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=a_ ) ) def A ( self : Optional[int] ): """simple docstring""" __snake_case = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(a_ ) __snake_case = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) __snake_case = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams __snake_case = tokenizer(a_ , padding=a_ , return_tensors="pt" ) __snake_case = model.generate( input_ids=dct["input_ids"].to(a_ ) , attention_mask=dct["attention_mask"].to(a_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) __snake_case = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] __snake_case = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=a_ , skip_special_tokens=a_ ) assert generated == expected_en
680
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType a : Any = get_logger(__name__) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any: os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __snake_case = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if accelerator.process_index == 0: logger.info(F'''Saving model to {output_model_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __snake_case = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Saving model to {output_model_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) logger.info(F'''Saving model to {ckpt_dir}''' ) __snake_case = {"model": state_dict} dist_cp.save_state_dict( state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , ) logger.info(F'''Model saved to {ckpt_dir}''' ) def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]: accelerator.wait_for_everyone() with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return __snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading model from {input_model_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __snake_case = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading model from {input_model_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __snake_case = ( os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' ) if F'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading model from {ckpt_dir}''' ) __snake_case = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , ) __snake_case = state_dict["model"] logger.info(F'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(_UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]: os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __snake_case = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Optimizer state saved in {output_optimizer_file}''' ) else: __snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) logger.info(F'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , ) logger.info(F'''Optimizer state saved in {ckpt_dir}''' ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]: accelerator.wait_for_everyone() with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __snake_case = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __snake_case = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' ) else: __snake_case = ( os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if F'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading Optimizer from {ckpt_dir}''' ) __snake_case = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , ) __snake_case = optim_state["optimizer"] logger.info(F'''Optimizer loaded from {ckpt_dir}''' ) __snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) optimizer.load_state_dict(_UpperCAmelCase )
680
1
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = TextToVideoSDPipeline __SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS __SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __SCREAMING_SNAKE_CASE = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def A ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) __snake_case = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) __snake_case = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , ) torch.manual_seed(0 ) __snake_case = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , ) __snake_case = CLIPTextModel(a_ ) __snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __snake_case = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def A ( self : Union[str, Any] , a_ : str , a_ : Optional[int]=0 ): """simple docstring""" if str(a_ ).startswith("mps" ): __snake_case = torch.manual_seed(a_ ) else: __snake_case = torch.Generator(device=a_ ).manual_seed(a_ ) __snake_case = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def A ( self : Any ): """simple docstring""" __snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator __snake_case = self.get_dummy_components() __snake_case = TextToVideoSDPipeline(**a_ ) __snake_case = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) __snake_case = self.get_dummy_inputs(a_ ) __snake_case = "np" __snake_case = sd_pipe(**a_ ).frames __snake_case = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) __snake_case = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A ( self : str ): """simple docstring""" self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a_ , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def A ( self : Tuple ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a_ , expected_max_diff=1e-2 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def A ( self : int ): """simple docstring""" pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def A ( self : int ): """simple docstring""" pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def A ( self : Optional[int] ): """simple docstring""" pass def A ( self : List[Any] ): """simple docstring""" return super().test_progress_bar() @slow @skip_mps class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : List[Any] ): """simple docstring""" __snake_case = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" ) __snake_case = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) __snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __snake_case = pipe.to("cuda" ) __snake_case = "Spiderman is surfing" __snake_case = torch.Generator(device="cpu" ).manual_seed(0 ) __snake_case = pipe(a_ , generator=a_ , num_inference_steps=25 , output_type="pt" ).frames __snake_case = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def A ( self : List[Any] ): """simple docstring""" __snake_case = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) __snake_case = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) __snake_case = pipe.to("cuda" ) __snake_case = "Spiderman is surfing" __snake_case = torch.Generator(device="cpu" ).manual_seed(0 ) __snake_case = pipe(a_ , generator=a_ , num_inference_steps=2 , output_type="pt" ).frames __snake_case = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str: if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("iterations must be defined as integers" ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) __snake_case = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_UpperCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' class SCREAMING_SNAKE_CASE__ : def __init__( self : str , a_ : list[int] ): """simple docstring""" __snake_case = len(a_ ) __snake_case = [0] * len_array if len_array > 0: __snake_case = array[0] for i in range(1 , a_ ): __snake_case = self.prefix_sum[i - 1] + array[i] def A ( self : int , a_ : int , a_ : int ): """simple docstring""" if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def A ( self : Tuple , a_ : int ): """simple docstring""" __snake_case = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(a_ ) return False if __name__ == "__main__": import doctest doctest.testmod()
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str: if number > 0: raise ValueError("input must be a negative integer" ) __snake_case = len(bin(_UpperCAmelCase )[3:] ) __snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:] __snake_case = ( ( "1" + "0" * (binary_number_length - len(_UpperCAmelCase )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : Optional[Any] , a_ : Any ): """simple docstring""" __snake_case = data def __iter__( self : int ): """simple docstring""" for element in self.data: yield element def __UpperCAmelCase ( _UpperCAmelCase : Dict=True ) -> List[Any]: __snake_case = Accelerator(even_batches=_UpperCAmelCase ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def __UpperCAmelCase ( _UpperCAmelCase : Accelerator , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : bool = False ) -> Optional[Any]: if iterable: __snake_case = DummyIterableDataset(torch.as_tensor(range(_UpperCAmelCase ) ) ) else: __snake_case = TensorDataset(torch.as_tensor(range(_UpperCAmelCase ) ) ) __snake_case = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase ) __snake_case = accelerator.prepare(_UpperCAmelCase ) return dl def __UpperCAmelCase ( _UpperCAmelCase : Accelerator , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : List[int] , _UpperCAmelCase : List[int] , ) -> Optional[int]: __snake_case = create_dataloader(accelerator=_UpperCAmelCase , dataset_size=_UpperCAmelCase , batch_size=_UpperCAmelCase ) __snake_case = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def __UpperCAmelCase ( ) -> List[Any]: __snake_case = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( _UpperCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( _UpperCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def __UpperCAmelCase ( ) -> Tuple: __snake_case = create_accelerator(even_batches=_UpperCAmelCase ) verify_dataloader_batch_sizes( _UpperCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( _UpperCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def __UpperCAmelCase ( ) -> Union[str, Any]: __snake_case = create_accelerator(even_batches=_UpperCAmelCase ) __snake_case = torch.nn.Linear(1 , 1 ) __snake_case = accelerator.prepare(_UpperCAmelCase ) __snake_case = create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 ) __snake_case = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(_UpperCAmelCase ): __snake_case = ddp_model(batch[0].float() ) __snake_case = output.sum() loss.backward() batch_idxs.append(_UpperCAmelCase ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> List[str]: with warnings.catch_warnings(record=_UpperCAmelCase ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , _UpperCAmelCase ) assert "only supported for multi-GPU" in str(w[-1].message ) def __UpperCAmelCase ( ) -> List[str]: __snake_case = True __snake_case = False __snake_case = create_accelerator(even_batches=_UpperCAmelCase ) __snake_case = torch.nn.Linear(1 , 1 ) __snake_case = accelerator.prepare(_UpperCAmelCase ) __snake_case = create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 ) __snake_case = create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=_UpperCAmelCase ): __snake_case = train_dl.batch_sampler.even_batches __snake_case = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def __UpperCAmelCase ( ) -> List[str]: __snake_case = True __snake_case = False __snake_case = create_accelerator(even_batches=_UpperCAmelCase ) __snake_case = torch.nn.Linear(1 , 1 ) __snake_case = accelerator.prepare(_UpperCAmelCase ) create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 , iterable=_UpperCAmelCase ) __snake_case = create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=_UpperCAmelCase ): __snake_case = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def __UpperCAmelCase ( ) -> Optional[int]: __snake_case = create_accelerator() __snake_case = torch.nn.Linear(1 , 1 ) __snake_case = accelerator.prepare(_UpperCAmelCase ) create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 , iterable=_UpperCAmelCase ) with warnings.catch_warnings(record=_UpperCAmelCase ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=_UpperCAmelCase ): pass assert issubclass(w[-1].category , _UpperCAmelCase ) assert "only supported for map-style datasets" in str(w[-1].message ) def __UpperCAmelCase ( ) -> Dict: __snake_case = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) __snake_case = accelerator.state.distributed_type __snake_case = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(_UpperCAmelCase ) __snake_case = original_state if __name__ == "__main__": main()
680
'''simple docstring''' from timeit import timeit def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int: if number < 0: raise ValueError("the value of input must not be negative" ) __snake_case = 0 while number: number &= number - 1 result += 1 return result def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int: if number < 0: raise ValueError("the value of input must not be negative" ) __snake_case = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def __UpperCAmelCase ( ) -> None: def do_benchmark(_UpperCAmelCase : int ) -> None: __snake_case = "import __main__ as z" print(F'''Benchmark when {number = }:''' ) print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' ) __snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase ) print(F'''timeit() runs in {timing} seconds''' ) print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' ) __snake_case = timeit( "z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , ) print(F'''timeit() runs in {timing} seconds''' ) for number in (25, 37, 58, 0): do_benchmark(_UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
680
1
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging a : int = logging.get_logger(__name__) a : Any = '''▁''' a : List[Any] = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } a : Tuple = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } a : Any = { '''facebook/m2m100_418M''': 1_024, } # fmt: off a : Any = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self : Tuple , a_ : List[Any] , a_ : Dict , a_ : List[str]=None , a_ : List[Any]=None , a_ : Union[str, Any]="<s>" , a_ : Tuple="</s>" , a_ : str="</s>" , a_ : Union[str, Any]="<pad>" , a_ : int="<unk>" , a_ : str="m2m100" , a_ : Optional[Dict[str, Any]] = None , a_ : Tuple=8 , **a_ : Union[str, Any] , ): """simple docstring""" __snake_case = {} if sp_model_kwargs is None else sp_model_kwargs __snake_case = language_codes __snake_case = FAIRSEQ_LANGUAGE_CODES[language_codes] __snake_case = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code} __snake_case = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(a_ ) for lang_code in fairseq_language_code if self.get_lang_token(a_ ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=a_ , tgt_lang=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , unk_token=a_ , pad_token=a_ , language_codes=a_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a_ , **a_ , ) __snake_case = vocab_file __snake_case = load_json(a_ ) __snake_case = {v: k for k, v in self.encoder.items()} __snake_case = spm_file __snake_case = load_spm(a_ , self.sp_model_kwargs ) __snake_case = len(self.encoder ) __snake_case = { self.get_lang_token(a_ ): self.encoder_size + i for i, lang_code in enumerate(a_ ) } __snake_case = {lang_code: self.encoder_size + i for i, lang_code in enumerate(a_ )} __snake_case = {v: k for k, v in self.lang_token_to_id.items()} __snake_case = src_lang if src_lang is not None else "en" __snake_case = tgt_lang __snake_case = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) __snake_case = num_madeup_words @property def A ( self : Any ): """simple docstring""" return len(self.encoder ) + len(self.lang_token_to_id ) @property def A ( self : Optional[Any] ): """simple docstring""" return self._src_lang @src_lang.setter def A ( self : Dict , a_ : str ): """simple docstring""" __snake_case = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def A ( self : Any , a_ : str ): """simple docstring""" return self.sp_model.encode(a_ , out_type=a_ ) def A ( self : Union[str, Any] , a_ : int ): """simple docstring""" if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(a_ , self.encoder[self.unk_token] ) def A ( self : Dict , a_ : int ): """simple docstring""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(a_ , self.unk_token ) def A ( self : Tuple , a_ : str ): """simple docstring""" __snake_case = [] __snake_case = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(a_ ) + token __snake_case = [] else: current_sub_tokens.append(a_ ) out_string += self.sp_model.decode(a_ ) return out_string.strip() def A ( self : int , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ ) __snake_case = [1] * len(self.prefix_tokens ) __snake_case = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(a_ )) + suffix_ones return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones def A ( self : Any , a_ : List[int] , a_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def A ( self : Any ): """simple docstring""" __snake_case = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): """simple docstring""" __snake_case = self.__dict__.copy() __snake_case = None return state def __setstate__( self : Optional[Any] , a_ : Dict ): """simple docstring""" __snake_case = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __snake_case = {} __snake_case = load_spm(self.spm_file , self.sp_model_kwargs ) def A ( self : Optional[Any] , a_ : str , a_ : Optional[str] = None ): """simple docstring""" __snake_case = Path(a_ ) if not save_dir.is_dir(): raise OSError(f'''{save_directory} should be a directory''' ) __snake_case = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] ) __snake_case = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] ) save_json(self.encoder , a_ ) if os.path.abspath(self.spm_file ) != os.path.abspath(a_ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , a_ ) elif not os.path.isfile(self.spm_file ): with open(a_ , "wb" ) as fi: __snake_case = self.sp_model.serialized_model_proto() fi.write(a_ ) return (str(a_ ), str(a_ )) def A ( self : Tuple , a_ : List[str] , a_ : str = "en" , a_ : Optional[List[str]] = None , a_ : str = "ro" , **a_ : List[str] , ): """simple docstring""" __snake_case = src_lang __snake_case = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(a_ , a_ , **a_ ) def A ( self : Any , a_ : Dict , a_ : Optional[str] , a_ : Optional[str] , **a_ : Optional[Any] ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) __snake_case = src_lang __snake_case = self(a_ , add_special_tokens=a_ , **a_ ) __snake_case = self.get_lang_id(a_ ) __snake_case = tgt_lang_id return inputs def A ( self : str ): """simple docstring""" self.set_src_lang_special_tokens(self.src_lang ) def A ( self : List[str] ): """simple docstring""" self.set_tgt_lang_special_tokens(self.tgt_lang ) def A ( self : Optional[int] , a_ : str ): """simple docstring""" __snake_case = self.get_lang_token(a_ ) __snake_case = self.lang_token_to_id[lang_token] __snake_case = [self.cur_lang_id] __snake_case = [self.eos_token_id] def A ( self : List[str] , a_ : str ): """simple docstring""" __snake_case = self.get_lang_token(a_ ) __snake_case = self.lang_token_to_id[lang_token] __snake_case = [self.cur_lang_id] __snake_case = [self.eos_token_id] def A ( self : List[str] , a_ : str ): """simple docstring""" return self.lang_code_to_token[lang] def A ( self : List[Any] , a_ : str ): """simple docstring""" __snake_case = self.get_lang_token(a_ ) return self.lang_token_to_id[lang_token] def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: __snake_case = sentencepiece.SentencePieceProcessor(**_UpperCAmelCase ) spm.Load(str(_UpperCAmelCase ) ) return spm def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Union[Dict, List]: with open(_UpperCAmelCase , "r" ) as f: return json.load(_UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> None: with open(_UpperCAmelCase , "w" ) as f: json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=2 )
680
'''simple docstring''' import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch a : Dict = '''sshleifer/bart-tiny-random''' a : str = '''patrickvonplaten/t5-tiny-random''' @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def A ( self : Union[str, Any] ): """simple docstring""" return AutoConfig.from_pretrained(a_ ) def A ( self : str ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def A ( self : Optional[Any] ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ ) def A ( self : Dict ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def A ( self : Optional[int] ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def A ( self : Dict ): """simple docstring""" with self.assertRaises(a_ ): create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
680
1
'''simple docstring''' from collections import Counter from timeit import timeit def __UpperCAmelCase ( _UpperCAmelCase : str = "" , ) -> bool: return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def __UpperCAmelCase ( _UpperCAmelCase : str = "" ) -> bool: if len(_UpperCAmelCase ) == 0: return True __snake_case = input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string __snake_case = {} for character in lower_case_input_str: __snake_case = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1 __snake_case = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def __UpperCAmelCase ( _UpperCAmelCase : str = "" ) -> None: print("\nFor string = " , _UpperCAmelCase , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": a : Dict = input( '''Enter string to determine if it can be rearranged as a palindrome or not: ''' ).strip() benchmark(check_str) a : Tuple = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
680
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors a : Any = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """sequence-classification""" def __init__( self : List[str] , a_ : str ): """simple docstring""" if type(a_ ) == dict: __snake_case = Namespace(**a_ ) __snake_case = glue_output_modes[hparams.task] __snake_case = glue_tasks_num_labels[hparams.task] super().__init__(a_ , a_ , self.mode ) def A ( self : Union[str, Any] , **a_ : List[Any] ): """simple docstring""" return self.model(**a_ ) def A ( self : int , a_ : Optional[Any] , a_ : int ): """simple docstring""" __snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __snake_case = self(**a_ ) __snake_case = outputs[0] __snake_case = self.trainer.lr_schedulers[0]["scheduler"] __snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def A ( self : List[str] ): """simple docstring""" __snake_case = self.hparams __snake_case = processors[args.task]() __snake_case = processor.get_labels() for mode in ["train", "dev"]: __snake_case = self._feature_file(a_ ) if os.path.exists(a_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , a_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) __snake_case = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) __snake_case = convert_examples_to_features( a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , a_ ) torch.save(a_ , a_ ) def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ): """simple docstring""" __snake_case = "dev" if mode == "test" else mode __snake_case = self._feature_file(a_ ) logger.info("Loading features from cached file %s" , a_ ) __snake_case = torch.load(a_ ) __snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) __snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": __snake_case = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": __snake_case = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , ) def A ( self : int , a_ : List[str] , a_ : Tuple ): """simple docstring""" __snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __snake_case = self(**a_ ) __snake_case , __snake_case = outputs[:2] __snake_case = logits.detach().cpu().numpy() __snake_case = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def A ( self : Dict , a_ : Optional[int] ): """simple docstring""" __snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() __snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": __snake_case = np.argmax(a_ , axis=1 ) elif self.hparams.glue_output_mode == "regression": __snake_case = np.squeeze(a_ ) __snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 ) __snake_case = [[] for _ in range(out_label_ids.shape[0] )] __snake_case = [[] for _ in range(out_label_ids.shape[0] )] __snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )} __snake_case = dict(results.items() ) __snake_case = results return ret, preds_list, out_label_list def A ( self : Tuple , a_ : list ): """simple docstring""" __snake_case , __snake_case , __snake_case = self._eval_end(a_ ) __snake_case = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def A ( self : int , a_ : Tuple ): """simple docstring""" __snake_case , __snake_case , __snake_case = self._eval_end(a_ ) __snake_case = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def A ( a_ : str , a_ : Any ): """simple docstring""" BaseTransformer.add_model_specific_args(a_ , a_ ) parser.add_argument( "--max_seq_length" , default=128 , type=a_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def __UpperCAmelCase ( ) -> Union[str, Any]: __snake_case = argparse.ArgumentParser() add_generic_args(_UpperCAmelCase , os.getcwd() ) __snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() ) __snake_case = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: __snake_case = os.path.join( "./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , ) os.makedirs(args.output_dir ) __snake_case = GLUETransformer(_UpperCAmelCase ) __snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: __snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) ) __snake_case = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_UpperCAmelCase ) if __name__ == "__main__": main()
680
1
'''simple docstring''' from __future__ import annotations a : List[Any] = 1.6021e-19 # units = C def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , ) -> tuple[str, float]: if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("You cannot supply more or less than 2 values" ) elif conductivity < 0: raise ValueError("Conductivity cannot be negative" ) elif electron_conc < 0: raise ValueError("Electron concentration cannot be negative" ) elif mobility < 0: raise ValueError("mobility cannot be negative" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
680
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] ) @pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase ) __snake_case = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: __snake_case = dataset_size < in_memory_max_size else: __snake_case = False __snake_case = is_small_dataset(_UpperCAmelCase ) assert result == expected
680
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : str = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float: if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float: if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets a : Union[str, Any] = datasets.logging.get_logger(__name__) a : Optional[Any] = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' a : Any = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' a : Tuple = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def A ( self : str ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def A ( self : int , a_ : str ): """simple docstring""" if self.config_name == "default": __snake_case = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: __snake_case = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def A ( self : List[str] , a_ : str , a_ : List[Any] , a_ : str , a_ : int=None , a_ : Union[str, Any]=False ): """simple docstring""" if gpus is None: __snake_case = 1 if torch.cuda.is_available() else 0 __snake_case = {"src": sources, "mt": predictions, "ref": references} __snake_case = [dict(zip(a_ , a_ ) ) for t in zip(*data.values() )] __snake_case , __snake_case = self.scorer.predict(a_ , gpus=a_ , progress_bar=a_ ) return {"mean_score": mean_score, "scores": scores}
680
'''simple docstring''' from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance a : Any = 6_378_137.0 a : List[Any] = 6_356_752.314_245 a : Dict = 6_378_137 def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float: __snake_case = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values __snake_case = (b_lata + b_lata) / 2 __snake_case = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2) __snake_case = cos(sigma / 2 ) ** 2 __snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2) __snake_case = sin(sigma / 2 ) ** 2 __snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device a : Optional[Any] = False class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): pass @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : int ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : List[Any] ): """simple docstring""" __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" ) # remove text_unet pipe.remove_unused_weights() pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = "A painting of a squirrel eating a burger " __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a_ ) __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = generator.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def A ( self : Optional[int] ): """simple docstring""" __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained( "shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = "A painting of a squirrel eating a burger " __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images __snake_case = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) __snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
680
'''simple docstring''' import math import sys import cva import numpy as np def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray: # For applying gaussian function for each element in matrix. __snake_case = math.sqrt(_UpperCAmelCase ) __snake_case = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray: __snake_case = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray: # Creates a gaussian kernel of given dimension. __snake_case = np.zeros((kernel_size, kernel_size) ) for i in range(0 , _UpperCAmelCase ): for j in range(0 , _UpperCAmelCase ): __snake_case = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray: __snake_case = np.zeros(img.shape ) __snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase ) __snake_case , __snake_case = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): __snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2] __snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase ) __snake_case = val return imga def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple: __snake_case = args[1] if args[1:] else "../image_data/lena.jpg" __snake_case = float(args[2] ) if args[2:] else 1.0 __snake_case = float(args[3] ) if args[3:] else 1.0 if args[4:]: __snake_case = int(args[4] ) __snake_case = kernel_size + abs(kernel_size % 2 - 1 ) else: __snake_case = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": a , a , a , a : Tuple = parse_args(sys.argv) a : Tuple = cva.imread(filename, 0) cva.imshow('''input image''', img) a : Dict = img / 255 a : str = out.astype('''float32''') a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) a : Dict = out * 255 a : List[str] = np.uinta(out) cva.imshow('''output image''', out) cva.waitKey(0) cva.destroyAllWindows()
680
1
'''simple docstring''' import math import sys import cva import numpy as np def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray: # For applying gaussian function for each element in matrix. __snake_case = math.sqrt(_UpperCAmelCase ) __snake_case = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray: __snake_case = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray: # Creates a gaussian kernel of given dimension. __snake_case = np.zeros((kernel_size, kernel_size) ) for i in range(0 , _UpperCAmelCase ): for j in range(0 , _UpperCAmelCase ): __snake_case = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray: __snake_case = np.zeros(img.shape ) __snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase ) __snake_case , __snake_case = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): __snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2] __snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase ) __snake_case = val return imga def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple: __snake_case = args[1] if args[1:] else "../image_data/lena.jpg" __snake_case = float(args[2] ) if args[2:] else 1.0 __snake_case = float(args[3] ) if args[3:] else 1.0 if args[4:]: __snake_case = int(args[4] ) __snake_case = kernel_size + abs(kernel_size % 2 - 1 ) else: __snake_case = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": a , a , a , a : Tuple = parse_args(sys.argv) a : Tuple = cva.imread(filename, 0) cva.imshow('''input image''', img) a : Dict = img / 255 a : str = out.astype('''float32''') a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) a : Dict = out * 255 a : List[str] = np.uinta(out) cva.imshow('''output image''', out) cva.waitKey(0) cva.destroyAllWindows()
680
'''simple docstring''' class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ): """simple docstring""" __snake_case = name __snake_case = value __snake_case = weight def __repr__( self : Optional[int] ): """simple docstring""" return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def A ( self : Any ): """simple docstring""" return self.value def A ( self : str ): """simple docstring""" return self.name def A ( self : int ): """simple docstring""" return self.weight def A ( self : Tuple ): """simple docstring""" return self.value / self.weight def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]: __snake_case = [] for i in range(len(_UpperCAmelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int: __snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase ) __snake_case = [] __snake_case , __snake_case = 0.0, 0.0 for i in range(len(_UpperCAmelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __UpperCAmelCase ( ) -> Optional[Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable a : List[str] = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Union[str, Any] = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Any = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys a : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
680
'''simple docstring''' import os from math import logaa def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int: __snake_case = 0 __snake_case = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ): __snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) ) if x * logaa(_UpperCAmelCase ) > largest: __snake_case = x * logaa(_UpperCAmelCase ) __snake_case = i + 1 return result if __name__ == "__main__": print(solution())
680
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Dict = { '''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[Any] = [ '''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Swinv2ForImageClassification''', '''Swinv2ForMaskedImageModeling''', '''Swinv2Model''', '''Swinv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
680
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer a : List[Any] = logging.get_logger(__name__) a : Dict = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } a : Any = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } a : Optional[int] = { '''facebook/blenderbot_small-90M''': 512, } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ): """simple docstring""" super().__init__( ByteLevelBPETokenizer( vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , ) __snake_case = add_prefix_space def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ): """simple docstring""" __snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ): """simple docstring""" __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
680
1
'''simple docstring''' import os from datetime import datetime as dt from github import Github a : Tuple = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def __UpperCAmelCase ( ) -> int: __snake_case = Github(os.environ["GITHUB_TOKEN"] ) __snake_case = g.get_repo("huggingface/diffusers" ) __snake_case = repo.get_issues(state="open" ) for issue in open_issues: __snake_case = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase ) __snake_case = comments[0] if len(_UpperCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
680
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : str = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
680
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : List[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """data2vec-text""" def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ): """simple docstring""" super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = hidden_act __snake_case = intermediate_size __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = position_embedding_type __snake_case = use_cache __snake_case = classifier_dropout class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): @property def A ( self : Any ): """simple docstring""" if self.task == "multiple-choice": __snake_case = {0: "batch", 1: "choice", 2: "sequence"} else: __snake_case = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
680
'''simple docstring''' # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers a : Optional[Any] = float('''nan''') class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : Optional[int] ): """simple docstring""" __snake_case = sys.stdout __snake_case = open(a_ , "a" ) def __getattr__( self : str , a_ : List[Any] ): """simple docstring""" return getattr(self.stdout , a_ ) def A ( self : Union[str, Any] , a_ : List[Any] ): """simple docstring""" self.stdout.write(a_ ) # strip tqdm codes self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) ) def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]: __snake_case = [] # deal with critical env vars __snake_case = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: __snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase ) if val is not None: cmd.append(F'''{key}={val}''' ) # python executable (not always needed if the script is executable) __snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(_UpperCAmelCase ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes __snake_case = [] __snake_case = "" while len(_UpperCAmelCase ) > 0: current_line += F'''{cmd.pop(0 )} ''' if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(_UpperCAmelCase ) __snake_case = "" return "\\\n".join(_UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple: # unwrap multi-line input __snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own __snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += F''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir __snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str: # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , ) __snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams __snake_case = variation.replace(" " , "-" ) with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f: f.write(result.stdout ) with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f: __snake_case = json.load(_UpperCAmelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict: __snake_case = [] __snake_case = [] __snake_case = F'''{id}: {variation:<{longest_variation_len}}''' __snake_case = F'''{preamble}: ''' __snake_case = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ): __snake_case = process_run_single( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case = single_run_metrics[target_metric_key] if not math.isnan(_UpperCAmelCase ): metrics.append(_UpperCAmelCase ) results.append(_UpperCAmelCase ) outcome += "✓" else: outcome += "✘" __snake_case = F'''\33[2K\r{outcome}''' if len(_UpperCAmelCase ) > 0: __snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} __snake_case = round(mean_metrics[target_metric_key] , 2 ) __snake_case = F'''{outcome} {mean_target}''' if len(_UpperCAmelCase ) > 1: results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}''' print(_UpperCAmelCase ) __snake_case = variation return mean_metrics else: print(_UpperCAmelCase ) return {variation_key: variation, target_metric_key: nan} def __UpperCAmelCase ( ) -> Optional[int]: __snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) ) return F''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]: __snake_case = pd.DataFrame(_UpperCAmelCase ) __snake_case = "variation" __snake_case = "diff_%" __snake_case = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan __snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(_UpperCAmelCase ): # as a fallback, use the minimal value as the sentinel __snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(_UpperCAmelCase ): __snake_case = df.apply( lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns __snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys] __snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols # capitalize __snake_case = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible __snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" ) __snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" ) __snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )] print("\n\n".join(_UpperCAmelCase ) ) def __UpperCAmelCase ( ) -> Dict: __snake_case = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , ) parser.add_argument( "--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) __snake_case = parser.parse_args() __snake_case = args.output_dir Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) __snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase ) # split each dimension into its --foo variations __snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty __snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) ) __snake_case = max(len(_UpperCAmelCase ) for x in variations ) # split wanted keys __snake_case = args.report_metric_keys.split() # capture prints into a log file for convenience __snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(F'''and this script\'s output is also piped into {report_fn}''' ) __snake_case = Tee(_UpperCAmelCase ) print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' ) print(F'''Base command: {" ".join(_UpperCAmelCase )}''' ) __snake_case = "variation" __snake_case = [] for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ): __snake_case = base_cmd + variation.split() results.append( process_run( id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) ) process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase ) if __name__ == "__main__": main()
680
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available a : List[str] = { '''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ '''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ErnieForCausalLM''', '''ErnieForMaskedLM''', '''ErnieForMultipleChoice''', '''ErnieForNextSentencePrediction''', '''ErnieForPreTraining''', '''ErnieForQuestionAnswering''', '''ErnieForSequenceClassification''', '''ErnieForTokenClassification''', '''ErnieModel''', '''ErniePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys a : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
680
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __UpperCAmelCase ( ) -> Dict: with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" __snake_case = [1, 2, 3] with pytest.raises(_UpperCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 ) with pytest.raises(_UpperCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]: __snake_case = [1, 2] __snake_case = {"a": 1, "b": 2} __snake_case = {"a": [1, 2], "b": [3, 4]} __snake_case = {"a": {"1": 1}, "b": 2} __snake_case = {"a": 1, "b": 2, "c": 3, "d": 4} __snake_case = [2, 3] __snake_case = {"a": 2, "b": 3} __snake_case = {"a": [2, 3], "b": [4, 5]} __snake_case = {"a": {"1": 2}, "b": 3} __snake_case = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
680
1
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : Dict , a_ : str , a_ : Tuple ): """simple docstring""" super().__init__() self.register_modules(unet=a_ , scheduler=a_ ) @torch.no_grad() def __call__( self : Tuple , a_ : int = 1 , a_ : Optional[torch.Generator] = None , a_ : int = 50 , a_ : Optional[str] = "pil" , a_ : bool = True , **a_ : Dict , ): """simple docstring""" __snake_case = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , ) __snake_case = image.to(self.device ) # set step values self.scheduler.set_timesteps(a_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output __snake_case = self.unet(a_ , a_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __snake_case = self.scheduler.step(a_ , a_ , a_ ).prev_sample __snake_case = (image / 2 + 0.5).clamp(0 , 1 ) __snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __snake_case = self.numpy_to_pil(a_ ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=a_ ), "This is a local test"
680
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : int = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """mobilenet_v2""" def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ): """simple docstring""" super().__init__(**a_ ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) __snake_case = num_channels __snake_case = image_size __snake_case = depth_multiplier __snake_case = depth_divisible_by __snake_case = min_depth __snake_case = expand_ratio __snake_case = output_stride __snake_case = first_layer_is_expansion __snake_case = finegrained_output __snake_case = hidden_act __snake_case = tf_padding __snake_case = classifier_dropout_prob __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = semantic_loss_ignore_index class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = version.parse("""1.11""" ) @property def A ( self : Optional[int] ): """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def A ( self : Optional[int] ): """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def A ( self : int ): """simple docstring""" return 1e-4
680
1
'''simple docstring''' import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger a : Optional[int] = get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( enum.Enum ): __SCREAMING_SNAKE_CASE = """all_checks""" __SCREAMING_SNAKE_CASE = """basic_checks""" __SCREAMING_SNAKE_CASE = """no_checks""" class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): pass class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): pass class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): pass class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): pass def __UpperCAmelCase ( _UpperCAmelCase : Optional[dict] , _UpperCAmelCase : dict , _UpperCAmelCase : int=None ) -> Union[str, Any]: if expected_checksums is None: logger.info("Unable to verify checksums." ) return if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) ) if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0: raise UnexpectedDownloadedFile(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) ) __snake_case = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] __snake_case = " for " + verification_name if verification_name is not None else "" if len(_UpperCAmelCase ) > 0: raise NonMatchingChecksumError( F'''Checksums didn\'t match{for_verification_name}:\n''' F'''{bad_urls}\n''' "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" ) logger.info("All the checksums matched successfully" + for_verification_name ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): pass class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): pass class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): pass class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): pass def __UpperCAmelCase ( _UpperCAmelCase : Optional[dict] , _UpperCAmelCase : dict ) -> List[str]: if expected_splits is None: logger.info("Unable to verify splits sizes." ) return if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0: raise ExpectedMoreSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) ) if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0: raise UnexpectedSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) ) __snake_case = [ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(_UpperCAmelCase ) > 0: raise NonMatchingSplitsSizesError(str(_UpperCAmelCase ) ) logger.info("All the splits matched successfully." ) def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : bool = True ) -> dict: if record_checksum: __snake_case = shaaaa() with open(_UpperCAmelCase , "rb" ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , B"" ): m.update(_UpperCAmelCase ) __snake_case = m.hexdigest() else: __snake_case = None return {"num_bytes": os.path.getsize(_UpperCAmelCase ), "checksum": checksum} def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Dict: if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
680
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : List[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """data2vec-text""" def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ): """simple docstring""" super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = hidden_act __snake_case = intermediate_size __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = position_embedding_type __snake_case = use_cache __snake_case = classifier_dropout class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): @property def A ( self : Any ): """simple docstring""" if self.task == "multiple-choice": __snake_case = {0: "batch", 1: "choice", 2: "sequence"} else: __snake_case = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
680
1
'''simple docstring''' from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""] __SCREAMING_SNAKE_CASE = """Pix2StructImageProcessor""" __SCREAMING_SNAKE_CASE = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self : List[str] , a_ : Dict , a_ : List[str] ): """simple docstring""" __snake_case = False super().__init__(a_ , a_ ) def __call__( self : List[Any] , a_ : int=None , a_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = None , a_ : Optional[int] = None , a_ : Optional[int] = 2_048 , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : str , ): """simple docstring""" if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None and not self.image_processor.is_vqa: __snake_case = self.tokenizer __snake_case = self.tokenizer( text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_token_type_ids=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values __snake_case = self.image_processor( a_ , return_tensors=a_ , max_patches=a_ , **a_ ) else: # add pixel_values and bbox __snake_case = self.image_processor( a_ , return_tensors=a_ , max_patches=a_ , header_text=a_ , **a_ ) if text is not None and not self.image_processor.is_vqa: __snake_case = self.tokenizer( text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_token_type_ids=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , ) if "attention_mask" in text_encoding: __snake_case = text_encoding.pop("attention_mask" ) if "input_ids" in text_encoding: __snake_case = text_encoding.pop("input_ids" ) else: __snake_case = None if text_encoding is not None: encoding_image_processor.update(a_ ) return encoding_image_processor def A ( self : str , *a_ : List[str] , **a_ : Tuple ): """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def A ( self : Union[str, Any] , *a_ : Optional[int] , **a_ : str ): """simple docstring""" return self.tokenizer.decode(*a_ , **a_ ) @property def A ( self : List[str] ): """simple docstring""" __snake_case = self.tokenizer.model_input_names __snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
680
'''simple docstring''' import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) a : Tuple = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ): """simple docstring""" __snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] ) __snake_case = layer_outputs[0] return hidden_states @add_start_docstrings( """The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : int , a_ : int ): """simple docstring""" super().__init__(a_ ) __snake_case = BertEncoderWithPabee(a_ ) self.init_weights() __snake_case = 0 __snake_case = 0 __snake_case = 0 __snake_case = 0 def A ( self : Optional[int] , a_ : Union[str, Any] ): """simple docstring""" __snake_case = threshold def A ( self : Optional[Any] , a_ : Union[str, Any] ): """simple docstring""" __snake_case = patience def A ( self : Any ): """simple docstring""" __snake_case = 0 __snake_case = 0 def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.inference_layers_num / self.inference_instances_num __snake_case = ( f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =''' f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***''' ) print(a_ ) @add_start_docstrings_to_model_forward(a_ ) def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ): """simple docstring""" if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: __snake_case = input_ids.size() elif inputs_embeds is not None: __snake_case = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) __snake_case = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __snake_case = torch.ones(a_ , device=a_ ) if token_type_ids is None: __snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __snake_case = self.get_extended_attention_mask(a_ , a_ , a_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: __snake_case , __snake_case , __snake_case = encoder_hidden_states.size() __snake_case = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: __snake_case = torch.ones(a_ , device=a_ ) __snake_case = self.invert_attention_mask(a_ ) else: __snake_case = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers ) __snake_case = self.embeddings( input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ ) __snake_case = embedding_output if self.training: __snake_case = [] for i in range(self.config.num_hidden_layers ): __snake_case = self.encoder.adaptive_forward( a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ ) __snake_case = self.pooler(a_ ) __snake_case = output_layers[i](output_dropout(a_ ) ) res.append(a_ ) elif self.patience == 0: # Use all layers for inference __snake_case = self.encoder( a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , ) __snake_case = self.pooler(encoder_outputs[0] ) __snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )] else: __snake_case = 0 __snake_case = None __snake_case = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 __snake_case = self.encoder.adaptive_forward( a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ ) __snake_case = self.pooler(a_ ) __snake_case = output_layers[i](a_ ) if regression: __snake_case = logits.detach() if patient_result is not None: __snake_case = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: __snake_case = 0 else: __snake_case = logits.detach().argmax(dim=1 ) if patient_result is not None: __snake_case = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(a_ ) ): patient_counter += 1 else: __snake_case = 0 __snake_case = logits if patient_counter == self.patience: break __snake_case = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( """Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : List[str] , a_ : Tuple ): """simple docstring""" super().__init__(a_ ) __snake_case = config.num_labels __snake_case = BertModelWithPabee(a_ ) __snake_case = nn.Dropout(config.hidden_dropout_prob ) __snake_case = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(a_ ) def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ): """simple docstring""" __snake_case = self.bert( input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) __snake_case = (logits[-1],) if labels is not None: __snake_case = None __snake_case = 0 for ix, logits_item in enumerate(a_ ): if self.num_labels == 1: # We are doing regression __snake_case = MSELoss() __snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: __snake_case = CrossEntropyLoss() __snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: __snake_case = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 __snake_case = (total_loss / total_weights,) + outputs return outputs
680
1
'''simple docstring''' # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers a : Optional[Any] = float('''nan''') class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : Optional[int] ): """simple docstring""" __snake_case = sys.stdout __snake_case = open(a_ , "a" ) def __getattr__( self : str , a_ : List[Any] ): """simple docstring""" return getattr(self.stdout , a_ ) def A ( self : Union[str, Any] , a_ : List[Any] ): """simple docstring""" self.stdout.write(a_ ) # strip tqdm codes self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) ) def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]: __snake_case = [] # deal with critical env vars __snake_case = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: __snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase ) if val is not None: cmd.append(F'''{key}={val}''' ) # python executable (not always needed if the script is executable) __snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(_UpperCAmelCase ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes __snake_case = [] __snake_case = "" while len(_UpperCAmelCase ) > 0: current_line += F'''{cmd.pop(0 )} ''' if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(_UpperCAmelCase ) __snake_case = "" return "\\\n".join(_UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple: # unwrap multi-line input __snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own __snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += F''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir __snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str: # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , ) __snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams __snake_case = variation.replace(" " , "-" ) with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f: f.write(result.stdout ) with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f: __snake_case = json.load(_UpperCAmelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict: __snake_case = [] __snake_case = [] __snake_case = F'''{id}: {variation:<{longest_variation_len}}''' __snake_case = F'''{preamble}: ''' __snake_case = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ): __snake_case = process_run_single( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case = single_run_metrics[target_metric_key] if not math.isnan(_UpperCAmelCase ): metrics.append(_UpperCAmelCase ) results.append(_UpperCAmelCase ) outcome += "✓" else: outcome += "✘" __snake_case = F'''\33[2K\r{outcome}''' if len(_UpperCAmelCase ) > 0: __snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} __snake_case = round(mean_metrics[target_metric_key] , 2 ) __snake_case = F'''{outcome} {mean_target}''' if len(_UpperCAmelCase ) > 1: results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}''' print(_UpperCAmelCase ) __snake_case = variation return mean_metrics else: print(_UpperCAmelCase ) return {variation_key: variation, target_metric_key: nan} def __UpperCAmelCase ( ) -> Optional[int]: __snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) ) return F''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]: __snake_case = pd.DataFrame(_UpperCAmelCase ) __snake_case = "variation" __snake_case = "diff_%" __snake_case = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan __snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(_UpperCAmelCase ): # as a fallback, use the minimal value as the sentinel __snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(_UpperCAmelCase ): __snake_case = df.apply( lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns __snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys] __snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols # capitalize __snake_case = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible __snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" ) __snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" ) __snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )] print("\n\n".join(_UpperCAmelCase ) ) def __UpperCAmelCase ( ) -> Dict: __snake_case = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , ) parser.add_argument( "--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) __snake_case = parser.parse_args() __snake_case = args.output_dir Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) __snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase ) # split each dimension into its --foo variations __snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty __snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) ) __snake_case = max(len(_UpperCAmelCase ) for x in variations ) # split wanted keys __snake_case = args.report_metric_keys.split() # capture prints into a log file for convenience __snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(F'''and this script\'s output is also piped into {report_fn}''' ) __snake_case = Tee(_UpperCAmelCase ) print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' ) print(F'''Base command: {" ".join(_UpperCAmelCase )}''' ) __snake_case = "variation" __snake_case = [] for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ): __snake_case = base_cmd + variation.split() results.append( process_run( id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) ) process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase ) if __name__ == "__main__": main()
680
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = is_training __snake_case = use_labels __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = backbone_out_indices __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = initializer_range __snake_case = num_labels __snake_case = backbone_featmap_shape __snake_case = scope __snake_case = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) __snake_case = (image_size // patch_size) ** 2 __snake_case = num_patches + 1 def A ( self : int ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __snake_case = self.get_config() return config, pixel_values, labels def A ( self : Optional[Any] ): """simple docstring""" __snake_case = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [96, 192, 384, 768], "num_groups": 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , ) def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ): """simple docstring""" __snake_case = DPTModel(config=a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ): """simple docstring""" __snake_case = self.num_labels __snake_case = DPTForDepthEstimation(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ): """simple docstring""" __snake_case = self.num_labels __snake_case = DPTForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , labels=a_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def A ( self : List[Any] ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () __SCREAMING_SNAKE_CASE = ( { """depth-estimation""": DPTForDepthEstimation, """feature-extraction""": DPTModel, """image-segmentation""": DPTForSemanticSegmentation, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def A ( self : Optional[Any] ): """simple docstring""" __snake_case = DPTModelTester(self ) __snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 ) def A ( self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DPT does not use inputs_embeds" ) def A ( self : Any ): """simple docstring""" pass def A ( self : Union[str, Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ , nn.Linear ) ) def A ( self : List[str] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a_ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ["pixel_values"] self.assertListEqual(arg_names[:1] , a_ ) def A ( self : int ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*a_ ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def A ( self : Optional[int] ): """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = True if model_class in get_values(a_ ): continue __snake_case = model_class(a_ ) model.to(a_ ) model.train() __snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ ) __snake_case = model(**a_ ).loss loss.backward() def A ( self : int ): """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = False __snake_case = True if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing: continue __snake_case = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() __snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ ) __snake_case = model(**a_ ).loss loss.backward() def A ( self : Dict ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = _config_zero_init(a_ ) for model_class in self.all_model_classes: __snake_case = model_class(config=a_ ) # Skip the check for the backbone __snake_case = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": __snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : Tuple ): """simple docstring""" pass @slow def A ( self : int ): """simple docstring""" for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: __snake_case = DPTModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def A ( self : int ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = "add" with self.assertRaises(a_ ): __snake_case = DPTForDepthEstimation(a_ ) def __UpperCAmelCase ( ) -> Union[str, Any]: __snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : Dict ): """simple docstring""" __snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" ) __snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ ) __snake_case = prepare_img() __snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ ) # forward pass with torch.no_grad(): __snake_case = model(**a_ ) __snake_case = outputs.predicted_depth # verify the predicted depth __snake_case = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , a_ ) __snake_case = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
680
1
'''simple docstring''' import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() a : int = logging.get_logger(__name__) a : List[Any] = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS} def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> Optional[Any]: if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' ) if tokenizer_name is None: __snake_case = TOKENIZER_CLASSES else: __snake_case = {tokenizer_name: getattr(_UpperCAmelCase , tokenizer_name + "Fast" )} logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' ) for tokenizer_name in tokenizer_names: __snake_case = TOKENIZER_CLASSES[tokenizer_name] __snake_case = True if checkpoint_name is None: __snake_case = list(tokenizer_class.max_model_input_sizes.keys() ) else: __snake_case = [checkpoint_name] logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' ) for checkpoint in checkpoint_names: logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' ) # Load tokenizer __snake_case = tokenizer_class.from_pretrained(_UpperCAmelCase , force_download=_UpperCAmelCase ) # Save fast tokenizer logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' ) # For organization names we create sub-directories if "/" in checkpoint: __snake_case , __snake_case = checkpoint.split("/" ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) elif add_prefix: __snake_case = checkpoint __snake_case = dump_path else: __snake_case = None __snake_case = dump_path logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: __snake_case = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] __snake_case = file_path.split(_UpperCAmelCase )[-1][0] if next_char == "/": __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = None logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) __snake_case = tokenizer.save_pretrained( _UpperCAmelCase , legacy_format=_UpperCAmelCase , filename_prefix=_UpperCAmelCase ) logger.info(F'''=> File names {file_names}''' ) for file_name in file_names: if not file_name.endswith("tokenizer.json" ): os.remove(_UpperCAmelCase ) logger.info(F'''=> removing {file_name}''' ) if __name__ == "__main__": a : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.''' ) parser.add_argument( '''--tokenizer_name''', default=None, type=str, help=( F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ''' '''download and convert all the checkpoints from AWS.''' ), ) parser.add_argument( '''--checkpoint_name''', default=None, type=str, help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''', ) parser.add_argument( '''--force_download''', action='''store_true''', help='''Re-download checkpoints.''', ) a : List[str] = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
680
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def A ( self : Any ): """simple docstring""" return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
680
1
'''simple docstring''' import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def __UpperCAmelCase ( _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Optional[int]=10 , _UpperCAmelCase : List[str]=1_00 , _UpperCAmelCase : Optional[int]=10_26 , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict="data/tokenized_stories_train_wikitext103.jbl" , _UpperCAmelCase : Optional[Any]="igf_context_pairs.jbl" , ) -> Any: set_seed(3 ) # generate train_data and objective_set __snake_case , __snake_case = generate_datasets( _UpperCAmelCase , _UpperCAmelCase , number=_UpperCAmelCase , min_len=10_26 , trim=_UpperCAmelCase ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? __snake_case = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # load pretrained model __snake_case = load_gpta("gpt2" ).to(_UpperCAmelCase ) print("computing perplexity on objective set" ) __snake_case = compute_perplexity(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).item() print("perplexity on objective set:" , _UpperCAmelCase ) # collect igf pairs and save to file demo.jbl collect_objective_set(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : Any=15 , _UpperCAmelCase : Union[str, Any]=1_28 , _UpperCAmelCase : List[str]=1_00 , _UpperCAmelCase : Optional[Any]="igf_model.pt" , ) -> Any: set_seed(42 ) # Load pre-trained model __snake_case = GPTaLMHeadModel.from_pretrained("gpt2" ) # Initialize secondary learner to use embedding weights of model __snake_case = SecondaryLearner(_UpperCAmelCase ) # Train secondary learner __snake_case = train_secondary_learner( _UpperCAmelCase , _UpperCAmelCase , max_epochs=_UpperCAmelCase , batch_size=_UpperCAmelCase , eval_freq=1_00 , igf_model_path=_UpperCAmelCase , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=32 , _UpperCAmelCase : List[str]=10_00 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : Optional[int]=recopy_gpta , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : int=10 , _UpperCAmelCase : str="gpt2_finetuned.pt" , ) -> int: __snake_case = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) __snake_case = RandomSampler(_UpperCAmelCase ) __snake_case = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase ) __snake_case = max_steps // (len(_UpperCAmelCase )) + 1 __snake_case = 0 __snake_case = torch.zeros((1, context_len) , dtype=torch.long , device=_UpperCAmelCase ) __snake_case , __snake_case , __snake_case = recopy_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) model.train() if secondary_learner is not None: secondary_learner.to(_UpperCAmelCase ) secondary_learner.eval() __snake_case = [] __snake_case = 0 __snake_case = [] __snake_case = [] # Compute the performance of the transformer model at the beginning __snake_case = compute_perplexity(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) test_perps.append(_UpperCAmelCase ) print("Test perplexity, step" , _UpperCAmelCase , ":" , _UpperCAmelCase ) for epoch in range(int(_UpperCAmelCase ) ): for step, example in enumerate(_UpperCAmelCase ): torch.cuda.empty_cache() __snake_case = random.randint(0 , example.size(2 ) - context_len - 1 ) __snake_case = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() __snake_case = model(_UpperCAmelCase , labels=_UpperCAmelCase ) __snake_case = True if secondary_learner is not None: __snake_case = secondary_learner.forward( torch.tensor(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ).unsqueeze(0 ) )[0].item() observed_qs.append(float(_UpperCAmelCase ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: __snake_case = -1 if predicted_q < threshold: __snake_case = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) __snake_case = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() __snake_case = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: __snake_case = compute_perplexity(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) test_perps.append(_UpperCAmelCase ) print("Test perplexity, step" , _UpperCAmelCase , ":" , _UpperCAmelCase ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , _UpperCAmelCase ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def __UpperCAmelCase ( ) -> int: __snake_case = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" ) # Required parameters parser.add_argument( "--data_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The input data dir. Should contain data files for WikiText." , ) parser.add_argument( "--model_name_or_path" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--data_file" , type=_UpperCAmelCase , default=_UpperCAmelCase , help=( "A jbl file containing tokenized data which can be split as objective dataset, " "train_dataset and test_dataset." ) , ) parser.add_argument( "--igf_data_file" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="A jbl file containing the context and information gain pairs to train secondary learner." , ) parser.add_argument( "--output_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The output directory where the final fine-tuned model is stored." , ) parser.add_argument( "--tokenizer_name" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument("--seed" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="A seed for reproducible training." ) parser.add_argument( "--context_len" , default=32 , type=_UpperCAmelCase , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--size_objective_set" , default=1_00 , type=_UpperCAmelCase , help="number of articles that are long enough to be used as our objective set" , ) parser.add_argument( "--eval_freq" , default=1_00 , type=_UpperCAmelCase , help="secondary model evaluation is triggered at eval_freq" ) parser.add_argument("--max_steps" , default=10_00 , type=_UpperCAmelCase , help="To calculate training epochs" ) parser.add_argument( "--secondary_learner_batch_size" , default=1_28 , type=_UpperCAmelCase , help="batch size of training data for secondary learner" , ) parser.add_argument( "--batch_size" , default=16 , type=_UpperCAmelCase , help="batch size of training data of language model(gpt2) " ) parser.add_argument( "--eval_interval" , default=10 , type=_UpperCAmelCase , help=( "decay the selectivity of our secondary learner filter from" "1 standard deviation above average to 1 below average after 10 batches" ) , ) parser.add_argument( "--number" , default=1_00 , type=_UpperCAmelCase , help="The number of examples split to be used as objective_set/test_data" ) parser.add_argument( "--min_len" , default=10_26 , type=_UpperCAmelCase , help="The minimum length of the article to be used as objective set" ) parser.add_argument( "--secondary_learner_max_epochs" , default=15 , type=_UpperCAmelCase , help="number of epochs to train secondary learner" ) parser.add_argument("--trim" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="truncate the example if it exceeds context length" ) parser.add_argument( "--threshold" , default=1.0 , type=_UpperCAmelCase , help=( "The threshold value used by secondary learner to filter the train_data and allow only" " informative data as input to the model" ) , ) parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_UpperCAmelCase , help="finetuned_model_name" ) parser.add_argument( "--recopy_model" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_UpperCAmelCase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , ) # Load train data for secondary learner __snake_case = joblib.load("data/IGF_values.jbl" ) # Train secondary learner __snake_case = training_secondary_learner( _UpperCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="igf_model.pt" , ) # load pretrained gpt2 model __snake_case = GPTaLMHeadModel.from_pretrained("gpt2" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model __snake_case , __snake_case = generate_datasets( context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_00 , min_len=10_26 , trim=_UpperCAmelCase ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_UpperCAmelCase , secondary_learner=_UpperCAmelCase , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , ) if __name__ == "__main__": main()
680
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device a : Optional[Any] = False class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): pass @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : int ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : List[Any] ): """simple docstring""" __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" ) # remove text_unet pipe.remove_unused_weights() pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = "A painting of a squirrel eating a burger " __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a_ ) __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = generator.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def A ( self : Optional[int] ): """simple docstring""" __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained( "shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = "A painting of a squirrel eating a burger " __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images __snake_case = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) __snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
680
1
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs a : List[Any] = imread(r'''digital_image_processing/image_data/lena_small.jpg''') a : Any = cvtColor(img, COLOR_BGR2GRAY) def __UpperCAmelCase ( ) -> Tuple: __snake_case = cn.convert_to_negative(_UpperCAmelCase ) # assert negative_img array for at least one True assert negative_img.any() def __UpperCAmelCase ( ) -> List[Any]: with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img: # Work around assertion for response assert str(cc.change_contrast(_UpperCAmelCase , 1_10 ) ).startswith( "<PIL.Image.Image image mode=RGB size=100x100 at" ) def __UpperCAmelCase ( ) -> List[Any]: __snake_case = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def __UpperCAmelCase ( ) -> int: __snake_case = imread("digital_image_processing/image_data/lena_small.jpg" , 0 ) # assert ambiguous array for all == True assert canny_img.all() __snake_case = canny.canny(_UpperCAmelCase ) # assert canny array for at least one True assert canny_array.any() def __UpperCAmelCase ( ) -> Optional[Any]: assert gg.gaussian_filter(_UpperCAmelCase , 5 , sigma=0.9 ).all() def __UpperCAmelCase ( ) -> Union[str, Any]: # laplace diagonals __snake_case = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __snake_case = conv.img_convolve(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase ) assert res.any() def __UpperCAmelCase ( ) -> str: assert med.median_filter(_UpperCAmelCase , 3 ).any() def __UpperCAmelCase ( ) -> Optional[Any]: __snake_case , __snake_case = sob.sobel_filter(_UpperCAmelCase ) assert grad.any() and theta.any() def __UpperCAmelCase ( ) -> Optional[int]: __snake_case = sp.make_sepia(_UpperCAmelCase , 20 ) assert sepia.all() def __UpperCAmelCase ( _UpperCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> str: __snake_case = bs.Burkes(imread(_UpperCAmelCase , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def __UpperCAmelCase ( _UpperCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Dict: __snake_case = rs.NearestNeighbour(imread(_UpperCAmelCase , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def __UpperCAmelCase ( ) -> Union[str, Any]: __snake_case = "digital_image_processing/image_data/lena.jpg" # Reading the image and converting it to grayscale. __snake_case = imread(_UpperCAmelCase , 0 ) # Test for get_neighbors_pixel function() return not None __snake_case = 0 __snake_case = 0 __snake_case = image[x_coordinate][y_coordinate] __snake_case = lbp.get_neighbors_pixel( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __snake_case = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __snake_case = lbp.local_binary_value(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) assert lbp_image.any()
680
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType a : Any = get_logger(__name__) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any: os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __snake_case = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if accelerator.process_index == 0: logger.info(F'''Saving model to {output_model_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __snake_case = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Saving model to {output_model_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) logger.info(F'''Saving model to {ckpt_dir}''' ) __snake_case = {"model": state_dict} dist_cp.save_state_dict( state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , ) logger.info(F'''Model saved to {ckpt_dir}''' ) def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]: accelerator.wait_for_everyone() with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return __snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading model from {input_model_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __snake_case = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading model from {input_model_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __snake_case = ( os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' ) if F'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading model from {ckpt_dir}''' ) __snake_case = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , ) __snake_case = state_dict["model"] logger.info(F'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(_UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]: os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __snake_case = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Optimizer state saved in {output_optimizer_file}''' ) else: __snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) logger.info(F'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , ) logger.info(F'''Optimizer state saved in {ckpt_dir}''' ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]: accelerator.wait_for_everyone() with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __snake_case = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __snake_case = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' ) else: __snake_case = ( os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if F'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading Optimizer from {ckpt_dir}''' ) __snake_case = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , ) __snake_case = optim_state["optimizer"] logger.info(F'''Optimizer loaded from {ckpt_dir}''' ) __snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) optimizer.load_state_dict(_UpperCAmelCase )
680
1
'''simple docstring''' import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": a : List[str] = argparse.ArgumentParser( description=( '''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned''' ''' Distillation''' ) ) parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2''']) parser.add_argument('''--model_name''', default='''roberta-large''', type=str) parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str) parser.add_argument('''--vocab_transform''', action='''store_true''') a : List[str] = parser.parse_args() if args.model_type == "roberta": a : List[str] = RobertaForMaskedLM.from_pretrained(args.model_name) a : List[str] = '''roberta''' elif args.model_type == "gpt2": a : int = GPTaLMHeadModel.from_pretrained(args.model_name) a : Union[str, Any] = '''transformer''' a : List[str] = model.state_dict() a : List[Any] = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: a : Any = state_dict[F'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: a : str = F'''{prefix}.embeddings.{w}.weight''' a : Optional[Any] = state_dict[param_name] for w in ["weight", "bias"]: a : List[Any] = F'''{prefix}.embeddings.LayerNorm.{w}''' a : List[str] = state_dict[param_name] # Transformer Blocks # a : int = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: a : int = state_dict[ F'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] a : List[Any] = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: a : List[str] = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: a : str = state_dict[F'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: a : List[Any] = state_dict[F'''lm_head.dense.{w}'''] a : int = state_dict[F'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: a : str = state_dict[F'''{prefix}.ln_f.{w}'''] a : Union[str, Any] = state_dict['''lm_head.weight'''] print(F'''N layers selected for distillation: {std_idx}''') print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str: if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("iterations must be defined as integers" ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) __snake_case = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_UpperCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance a : Any = 6_378_137.0 a : List[Any] = 6_356_752.314_245 a : Dict = 6_378_137 def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float: __snake_case = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values __snake_case = (b_lata + b_lata) / 2 __snake_case = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2) __snake_case = cos(sigma / 2 ) ** 2 __snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2) __snake_case = sin(sigma / 2 ) ** 2 __snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str: if number > 0: raise ValueError("input must be a negative integer" ) __snake_case = len(bin(_UpperCAmelCase )[3:] ) __snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:] __snake_case = ( ( "1" + "0" * (binary_number_length - len(_UpperCAmelCase )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] ) @pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] ) @pytest.mark.parametrize("revision" , [None, "v2"] ) def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> int: __snake_case = hf_hub_url(repo_id=_UpperCAmelCase , path=_UpperCAmelCase , revision=_UpperCAmelCase ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(_UpperCAmelCase )}'''
680
'''simple docstring''' from timeit import timeit def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int: if number < 0: raise ValueError("the value of input must not be negative" ) __snake_case = 0 while number: number &= number - 1 result += 1 return result def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int: if number < 0: raise ValueError("the value of input must not be negative" ) __snake_case = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def __UpperCAmelCase ( ) -> None: def do_benchmark(_UpperCAmelCase : int ) -> None: __snake_case = "import __main__ as z" print(F'''Benchmark when {number = }:''' ) print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' ) __snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase ) print(F'''timeit() runs in {timing} seconds''' ) print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' ) __snake_case = timeit( "z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , ) print(F'''timeit() runs in {timing} seconds''' ) for number in (25, 37, 58, 0): do_benchmark(_UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
680
1
'''simple docstring''' import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow a : Dict = False class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : Union[str, Any] , a_ : List[Any]=32 ): """simple docstring""" set_seed(0 ) __snake_case = UNetaDModel(sample_size=a_ , in_channels=3 , out_channels=3 ) __snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 ) return model, optimizer @slow def A ( self : Optional[int] ): """simple docstring""" __snake_case = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable __snake_case = DDPMScheduler( num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=a_ , ) __snake_case = DDIMScheduler( num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=a_ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) __snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(a_ ) for _ in range(4 )] __snake_case = [torch.randn((4, 3, 32, 32) ).to(a_ ) for _ in range(4 )] __snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(a_ ) for _ in range(4 )] # train with a DDPM scheduler __snake_case , __snake_case = self.get_model_optimizer(resolution=32 ) model.train().to(a_ ) for i in range(4 ): optimizer.zero_grad() __snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) __snake_case = model(a_ , timesteps[i] ).sample __snake_case = torch.nn.functional.mse_loss(a_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM __snake_case , __snake_case = self.get_model_optimizer(resolution=32 ) model.train().to(a_ ) for i in range(4 ): optimizer.zero_grad() __snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) __snake_case = model(a_ , timesteps[i] ).sample __snake_case = torch.nn.functional.mse_loss(a_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(a_ , a_ , atol=1e-5 ) ) self.assertTrue(torch.allclose(a_ , a_ , atol=1e-5 ) )
680
'''simple docstring''' import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch a : Dict = '''sshleifer/bart-tiny-random''' a : str = '''patrickvonplaten/t5-tiny-random''' @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def A ( self : Union[str, Any] ): """simple docstring""" return AutoConfig.from_pretrained(a_ ) def A ( self : str ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def A ( self : Optional[Any] ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ ) def A ( self : Dict ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def A ( self : Optional[int] ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def A ( self : Dict ): """simple docstring""" with self.assertRaises(a_ ): create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
680
1
'''simple docstring''' from PIL import Image def __UpperCAmelCase ( _UpperCAmelCase : Image , _UpperCAmelCase : int ) -> Image: __snake_case = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level)) def contrast(_UpperCAmelCase : int ) -> int: return int(1_28 + factor * (c - 1_28) ) return img.point(_UpperCAmelCase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 a : Dict = change_contrast(img, 170) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
680
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors a : Any = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """sequence-classification""" def __init__( self : List[str] , a_ : str ): """simple docstring""" if type(a_ ) == dict: __snake_case = Namespace(**a_ ) __snake_case = glue_output_modes[hparams.task] __snake_case = glue_tasks_num_labels[hparams.task] super().__init__(a_ , a_ , self.mode ) def A ( self : Union[str, Any] , **a_ : List[Any] ): """simple docstring""" return self.model(**a_ ) def A ( self : int , a_ : Optional[Any] , a_ : int ): """simple docstring""" __snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __snake_case = self(**a_ ) __snake_case = outputs[0] __snake_case = self.trainer.lr_schedulers[0]["scheduler"] __snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def A ( self : List[str] ): """simple docstring""" __snake_case = self.hparams __snake_case = processors[args.task]() __snake_case = processor.get_labels() for mode in ["train", "dev"]: __snake_case = self._feature_file(a_ ) if os.path.exists(a_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , a_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) __snake_case = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) __snake_case = convert_examples_to_features( a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , a_ ) torch.save(a_ , a_ ) def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ): """simple docstring""" __snake_case = "dev" if mode == "test" else mode __snake_case = self._feature_file(a_ ) logger.info("Loading features from cached file %s" , a_ ) __snake_case = torch.load(a_ ) __snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) __snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": __snake_case = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": __snake_case = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , ) def A ( self : int , a_ : List[str] , a_ : Tuple ): """simple docstring""" __snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __snake_case = self(**a_ ) __snake_case , __snake_case = outputs[:2] __snake_case = logits.detach().cpu().numpy() __snake_case = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def A ( self : Dict , a_ : Optional[int] ): """simple docstring""" __snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() __snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": __snake_case = np.argmax(a_ , axis=1 ) elif self.hparams.glue_output_mode == "regression": __snake_case = np.squeeze(a_ ) __snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 ) __snake_case = [[] for _ in range(out_label_ids.shape[0] )] __snake_case = [[] for _ in range(out_label_ids.shape[0] )] __snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )} __snake_case = dict(results.items() ) __snake_case = results return ret, preds_list, out_label_list def A ( self : Tuple , a_ : list ): """simple docstring""" __snake_case , __snake_case , __snake_case = self._eval_end(a_ ) __snake_case = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def A ( self : int , a_ : Tuple ): """simple docstring""" __snake_case , __snake_case , __snake_case = self._eval_end(a_ ) __snake_case = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def A ( a_ : str , a_ : Any ): """simple docstring""" BaseTransformer.add_model_specific_args(a_ , a_ ) parser.add_argument( "--max_seq_length" , default=128 , type=a_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def __UpperCAmelCase ( ) -> Union[str, Any]: __snake_case = argparse.ArgumentParser() add_generic_args(_UpperCAmelCase , os.getcwd() ) __snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() ) __snake_case = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: __snake_case = os.path.join( "./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , ) os.makedirs(args.output_dir ) __snake_case = GLUETransformer(_UpperCAmelCase ) __snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: __snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) ) __snake_case = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_UpperCAmelCase ) if __name__ == "__main__": main()
680
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a : str = logging.get_logger(__name__) a : str = { '''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """donut-swin""" __SCREAMING_SNAKE_CASE = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Optional[Any] , a_ : List[Any]=224 , a_ : List[Any]=4 , a_ : int=3 , a_ : Dict=96 , a_ : Union[str, Any]=[2, 2, 6, 2] , a_ : List[str]=[3, 6, 12, 24] , a_ : Optional[Any]=7 , a_ : List[Any]=4.0 , a_ : Optional[Any]=True , a_ : Any=0.0 , a_ : Union[str, Any]=0.0 , a_ : Dict=0.1 , a_ : int="gelu" , a_ : Dict=False , a_ : int=0.02 , a_ : str=1e-5 , **a_ : str , ): """simple docstring""" super().__init__(**a_ ) __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = embed_dim __snake_case = depths __snake_case = len(a_ ) __snake_case = num_heads __snake_case = window_size __snake_case = mlp_ratio __snake_case = qkv_bias __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = drop_path_rate __snake_case = hidden_act __snake_case = use_absolute_embeddings __snake_case = layer_norm_eps __snake_case = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __snake_case = int(embed_dim * 2 ** (len(a_ ) - 1) )
680
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] ) @pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase ) __snake_case = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: __snake_case = dataset_size < in_memory_max_size else: __snake_case = False __snake_case = is_small_dataset(_UpperCAmelCase ) assert result == expected
680
1
'''simple docstring''' import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self : Optional[int] , a_ : Optional[Any] , a_ : List[Any]=100 , a_ : Any=13 , a_ : List[Any]=30 , a_ : Any=2 , a_ : List[Any]=3 , a_ : Tuple=True , a_ : Optional[Any]=True , a_ : Any=32 , a_ : Any=4 , a_ : str=4 , a_ : Tuple=37 , a_ : int="gelu" , a_ : str=0.1 , a_ : Union[str, Any]=0.1 , a_ : List[str]=10 , a_ : List[Any]=0.02 , a_ : Optional[int]=3 , a_ : List[str]=None , a_ : Optional[int]=[0, 1, 2, 3] , ): """simple docstring""" __snake_case = parent __snake_case = 100 __snake_case = batch_size __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = is_training __snake_case = use_labels __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = scope __snake_case = out_indices __snake_case = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __snake_case = (image_size // patch_size) ** 2 __snake_case = num_patches + 1 def A ( self : Tuple ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __snake_case = self.get_config() return config, pixel_values, labels, pixel_labels def A ( self : Any ): """simple docstring""" return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def A ( self : Any , a_ : List[str] , a_ : List[Any] , a_ : str , a_ : int ): """simple docstring""" __snake_case = BeitModel(config=a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Optional[int] , a_ : Optional[int] , a_ : List[Any] , a_ : Optional[int] , a_ : Any ): """simple docstring""" __snake_case = BeitForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def A ( self : List[Any] , a_ : Any , a_ : Optional[int] , a_ : List[str] , a_ : Optional[Any] ): """simple docstring""" __snake_case = self.type_sequence_label_size __snake_case = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __snake_case = 1 __snake_case = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() __snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __snake_case = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A ( self : List[Any] , a_ : Optional[Any] , a_ : str , a_ : Optional[int] , a_ : Tuple ): """simple docstring""" __snake_case = self.num_labels __snake_case = BeitForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) __snake_case = model(a_ , labels=a_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = ( { """feature-extraction""": BeitModel, """image-classification""": BeitForImageClassification, """image-segmentation""": BeitForSemanticSegmentation, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def A ( self : str ): """simple docstring""" __snake_case = BeitModelTester(self ) __snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 ) def A ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def A ( self : List[str] ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def A ( self : List[str] ): """simple docstring""" pass def A ( self : Union[str, Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ , nn.Linear ) ) def A ( self : str ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a_ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ["pixel_values"] self.assertListEqual(arg_names[:1] , a_ ) def A ( self : str ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def A ( self : Tuple ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a_ ) def A ( self : List[str] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def A ( self : List[str] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def A ( self : Tuple ): """simple docstring""" if not self.model_tester.is_training: return __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]: continue __snake_case = model_class(a_ ) model.to(a_ ) model.train() __snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ ) __snake_case = model(**a_ ).loss loss.backward() def A ( self : str ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __snake_case = False __snake_case = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(a_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue __snake_case = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() __snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ ) __snake_case = model(**a_ ).loss loss.backward() def A ( self : Dict ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = _config_zero_init(a_ ) for model_class in self.all_model_classes: __snake_case = model_class(config=a_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def A ( self : str ): """simple docstring""" for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = BeitModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def __UpperCAmelCase ( ) -> Optional[Any]: __snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def A ( self : List[Any] ): """simple docstring""" return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def A ( self : Optional[int] ): """simple docstring""" __snake_case = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(a_ ) __snake_case = self.default_image_processor __snake_case = prepare_img() __snake_case = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ ) # prepare bool_masked_pos __snake_case = torch.ones((1, 196) , dtype=torch.bool ).to(a_ ) # forward pass with torch.no_grad(): __snake_case = model(pixel_values=a_ , bool_masked_pos=a_ ) __snake_case = outputs.logits # verify the logits __snake_case = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , a_ ) __snake_case = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(a_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , a_ , atol=1e-2 ) ) @slow def A ( self : str ): """simple docstring""" __snake_case = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(a_ ) __snake_case = self.default_image_processor __snake_case = prepare_img() __snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ ) # forward pass with torch.no_grad(): __snake_case = model(**a_ ) __snake_case = outputs.logits # verify the logits __snake_case = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , a_ ) __snake_case = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3] , a_ , atol=1e-4 ) ) __snake_case = 281 self.assertEqual(logits.argmax(-1 ).item() , a_ ) @slow def A ( self : List[str] ): """simple docstring""" __snake_case = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( a_ ) __snake_case = self.default_image_processor __snake_case = prepare_img() __snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ ) # forward pass with torch.no_grad(): __snake_case = model(**a_ ) __snake_case = outputs.logits # verify the logits __snake_case = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , a_ ) __snake_case = torch.tensor([1.6881, -0.2787, 0.5901] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3] , a_ , atol=1e-4 ) ) __snake_case = 2_396 self.assertEqual(logits.argmax(-1 ).item() , a_ ) @slow def A ( self : List[Any] ): """simple docstring""" __snake_case = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) __snake_case = model.to(a_ ) __snake_case = BeitImageProcessor(do_resize=a_ , size=640 , do_center_crop=a_ ) __snake_case = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) __snake_case = Image.open(ds[0]["file"] ) __snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ ) # forward pass with torch.no_grad(): __snake_case = model(**a_ ) __snake_case = outputs.logits # verify the logits __snake_case = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , a_ ) __snake_case = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: __snake_case = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=a_ , ) else: __snake_case = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=a_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-4 ) ) @slow def A ( self : Optional[Any] ): """simple docstring""" __snake_case = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) __snake_case = model.to(a_ ) __snake_case = BeitImageProcessor(do_resize=a_ , size=640 , do_center_crop=a_ ) __snake_case = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) __snake_case = Image.open(ds[0]["file"] ) __snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ ) # forward pass with torch.no_grad(): __snake_case = model(**a_ ) __snake_case = outputs.logits.detach().cpu() __snake_case = image_processor.post_process_semantic_segmentation(outputs=a_ , target_sizes=[(500, 300)] ) __snake_case = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , a_ ) __snake_case = image_processor.post_process_semantic_segmentation(outputs=a_ ) __snake_case = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , a_ )
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float: if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float: if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' import csv import tweepy # Twitter API credentials a : List[str] = '''''' a : int = '''''' a : List[Any] = '''''' a : Optional[int] = '''''' def __UpperCAmelCase ( _UpperCAmelCase : str ) -> None: # authorize twitter, initialize tweepy __snake_case = tweepy.OAuthHandler(_UpperCAmelCase , _UpperCAmelCase ) auth.set_access_token(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = tweepy.API(_UpperCAmelCase ) # initialize a list to hold all the tweepy Tweets __snake_case = [] # make initial request for most recent tweets (200 is the maximum allowed count) __snake_case = api.user_timeline(screen_name=_UpperCAmelCase , count=2_00 ) # save most recent tweets alltweets.extend(_UpperCAmelCase ) # save the id of the oldest tweet less one __snake_case = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(_UpperCAmelCase ) > 0: print(F'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates __snake_case = api.user_timeline( screen_name=_UpperCAmelCase , count=2_00 , max_id=_UpperCAmelCase ) # save most recent tweets alltweets.extend(_UpperCAmelCase ) # update the id of the oldest tweet less one __snake_case = alltweets[-1].id - 1 print(F'''...{len(_UpperCAmelCase )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv __snake_case = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F'''new_{screen_name}_tweets.csv''' , "w" ) as f: __snake_case = csv.writer(_UpperCAmelCase ) writer.writerow(["id", "created_at", "text"] ) writer.writerows(_UpperCAmelCase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
680
'''simple docstring''' from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance a : Any = 6_378_137.0 a : List[Any] = 6_356_752.314_245 a : Dict = 6_378_137 def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float: __snake_case = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values __snake_case = (b_lata + b_lata) / 2 __snake_case = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2) __snake_case = cos(sigma / 2 ) ** 2 __snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2) __snake_case = sin(sigma / 2 ) ** 2 __snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = None def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=0.999 , _UpperCAmelCase : List[str]="cosine" , ) -> Optional[int]: if alpha_transform_type == "cosine": def alpha_bar_fn(_UpperCAmelCase : List[Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_UpperCAmelCase : Dict ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __snake_case = [] for i in range(_UpperCAmelCase ): __snake_case = i / num_diffusion_timesteps __snake_case = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) ) return torch.tensor(_UpperCAmelCase , dtype=torch.floataa ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase ): __SCREAMING_SNAKE_CASE = 1 @register_to_config def __init__( self : int , a_ : int = 1_000 , a_ : float = 0.0001 , a_ : float = 0.02 , a_ : str = "linear" , a_ : Optional[Union[np.ndarray, List[float]]] = None , a_ : bool = True , a_ : bool = True , a_ : int = 0 , a_ : str = "epsilon" , a_ : float = 1.0 , **a_ : Any , ): """simple docstring""" if kwargs.get("set_alpha_to_one" , a_ ) is not None: __snake_case = ( "The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead." ) deprecate("set_alpha_to_one" , "1.0.0" , a_ , standard_warn=a_ ) __snake_case = kwargs["set_alpha_to_one"] if trained_betas is not None: __snake_case = torch.tensor(a_ , dtype=torch.floataa ) elif beta_schedule == "linear": __snake_case = torch.linspace(a_ , a_ , a_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __snake_case = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , a_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __snake_case = betas_for_alpha_bar(a_ ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) __snake_case = 1.0 - self.betas __snake_case = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. __snake_case = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution __snake_case = 1.0 # setable values __snake_case = None __snake_case = torch.from_numpy(np.arange(0 , a_ ).copy().astype(np.intaa ) ) def A ( self : str , a_ : torch.FloatTensor , a_ : Optional[int] = None ): """simple docstring""" return sample def A ( self : Optional[Any] , a_ : int , a_ : Union[str, torch.device] = None ): """simple docstring""" if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' f''' maximal {self.config.num_train_timesteps} timesteps.''' ) __snake_case = num_inference_steps __snake_case = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __snake_case = (np.arange(0 , a_ ) * step_ratio).round().copy().astype(np.intaa ) __snake_case = torch.from_numpy(a_ ).to(a_ ) self.timesteps += self.config.steps_offset def A ( self : str , a_ : torch.FloatTensor , a_ : int , a_ : torch.FloatTensor , a_ : float = 0.0 , a_ : bool = False , a_ : Optional[torch.FloatTensor] = None , a_ : bool = True , ): """simple docstring""" __snake_case = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process __snake_case = self.alphas_cumprod[timestep] __snake_case = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) __snake_case = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": __snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 __snake_case = model_output elif self.config.prediction_type == "sample": __snake_case = model_output __snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": __snake_case = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output __snake_case = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' " `v_prediction`" ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: __snake_case = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __snake_case = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=a_ , pred_original_sample=a_ ) def __len__( self : Optional[Any] ): """simple docstring""" return self.config.num_train_timesteps
680
'''simple docstring''' import math import sys import cva import numpy as np def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray: # For applying gaussian function for each element in matrix. __snake_case = math.sqrt(_UpperCAmelCase ) __snake_case = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray: __snake_case = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray: # Creates a gaussian kernel of given dimension. __snake_case = np.zeros((kernel_size, kernel_size) ) for i in range(0 , _UpperCAmelCase ): for j in range(0 , _UpperCAmelCase ): __snake_case = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray: __snake_case = np.zeros(img.shape ) __snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase ) __snake_case , __snake_case = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): __snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2] __snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase ) __snake_case = val return imga def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple: __snake_case = args[1] if args[1:] else "../image_data/lena.jpg" __snake_case = float(args[2] ) if args[2:] else 1.0 __snake_case = float(args[3] ) if args[3:] else 1.0 if args[4:]: __snake_case = int(args[4] ) __snake_case = kernel_size + abs(kernel_size % 2 - 1 ) else: __snake_case = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": a , a , a , a : Tuple = parse_args(sys.argv) a : Tuple = cva.imread(filename, 0) cva.imshow('''input image''', img) a : Dict = img / 255 a : str = out.astype('''float32''') a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) a : Dict = out * 255 a : List[str] = np.uinta(out) cva.imshow('''output image''', out) cva.waitKey(0) cva.destroyAllWindows()
680
1
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self : Optional[int] , a_ : Optional[int] , a_ : Union[str, Any]=13 , a_ : Union[str, Any]=10 , a_ : List[Any]=3 , a_ : Any=2 , a_ : Optional[int]=2 , a_ : Tuple=2 , a_ : List[Any]=True , a_ : Tuple=True , a_ : Any=32 , a_ : Tuple=5 , a_ : str=4 , a_ : Optional[Any]=37 , a_ : Union[str, Any]="gelu" , a_ : str=0.1 , a_ : Any=0.1 , a_ : Optional[int]=10 , a_ : Any=0.02 , a_ : List[str]=0.9 , a_ : Any=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = patch_size __snake_case = tubelet_size __snake_case = num_frames __snake_case = is_training __snake_case = use_labels __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = mask_ratio __snake_case = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame __snake_case = (image_size // patch_size) ** 2 __snake_case = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos __snake_case = int(mask_ratio * self.seq_length ) def A ( self : Any ): """simple docstring""" __snake_case = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = self.get_config() return config, pixel_values, labels def A ( self : List[Any] ): """simple docstring""" return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , ) def A ( self : List[str] , a_ : int , a_ : Any , a_ : Optional[int] ): """simple docstring""" __snake_case = VideoMAEModel(config=a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Dict , a_ : List[Any] , a_ : Dict , a_ : Optional[Any] ): """simple docstring""" __snake_case = VideoMAEForPreTraining(a_ ) model.to(a_ ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch __snake_case = torch.ones((self.num_masks,) ) __snake_case = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) __snake_case = mask.expand(self.batch_size , -1 ).bool() __snake_case = model(a_ , a_ ) # model only returns predictions for masked patches __snake_case = mask.sum().item() __snake_case = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def A ( self : Dict ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = ( {"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def A ( self : Dict ): """simple docstring""" __snake_case = VideoMAEModelTester(self ) __snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 ) def A ( self : List[str] , a_ : str , a_ : List[str] , a_ : str=False ): """simple docstring""" __snake_case = copy.deepcopy(a_ ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch __snake_case = torch.ones((self.model_tester.num_masks,) ) __snake_case = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) __snake_case = mask.expand(self.model_tester.batch_size , -1 ).bool() __snake_case = bool_masked_pos.to(a_ ) if return_labels: if model_class in [ *get_values(a_ ), ]: __snake_case = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) return inputs_dict def A ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="VideoMAE does not use inputs_embeds" ) def A ( self : List[Any] ): """simple docstring""" pass def A ( self : int ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ , nn.Linear ) ) def A ( self : List[Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a_ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ["pixel_values"] self.assertListEqual(arg_names[:1] , a_ ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def A ( self : Optional[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*a_ ) @slow def A ( self : str ): """simple docstring""" for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = VideoMAEModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def A ( self : Optional[Any] ): """simple docstring""" if not self.has_attentions: pass else: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = True for model_class in self.all_model_classes: __snake_case = self.model_tester.seq_length - self.model_tester.num_masks __snake_case = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) __snake_case = True __snake_case = False __snake_case = True __snake_case = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(a_ , a_ ) ) __snake_case = outputs.attentions self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case = True __snake_case = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(a_ , a_ ) ) __snake_case = outputs.attentions self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __snake_case = len(a_ ) # Check attention is always last and order is fine __snake_case = True __snake_case = True __snake_case = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(a_ , a_ ) ) self.assertEqual(out_len + 1 , len(a_ ) ) __snake_case = outputs.attentions self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def A ( self : Union[str, Any] ): """simple docstring""" def check_hidden_states_output(a_ : List[str] , a_ : Union[str, Any] , a_ : Optional[Any] ): __snake_case = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(a_ , a_ ) ) __snake_case = outputs.hidden_states __snake_case = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(a_ ) , a_ ) __snake_case = self.model_tester.seq_length - self.model_tester.num_masks __snake_case = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = True check_hidden_states_output(a_ , a_ , a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case = True check_hidden_states_output(a_ , a_ , a_ ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : Optional[int] ): """simple docstring""" pass def __UpperCAmelCase ( ) -> Any: __snake_case = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) __snake_case = np.load(_UpperCAmelCase ) return list(_UpperCAmelCase ) @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def A ( self : List[str] ): """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def A ( self : List[str] ): """simple docstring""" __snake_case = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to( a_ ) __snake_case = self.default_image_processor __snake_case = prepare_video() __snake_case = image_processor(a_ , return_tensors="pt" ).to(a_ ) # forward pass with torch.no_grad(): __snake_case = model(**a_ ) # verify the logits __snake_case = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , a_ ) __snake_case = torch.tensor([0.3669, -0.0688, -0.2421] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) ) @slow def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(a_ ) __snake_case = self.default_image_processor __snake_case = prepare_video() __snake_case = image_processor(a_ , return_tensors="pt" ).to(a_ ) # add boolean mask, indicating which patches to mask __snake_case = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" ) __snake_case = torch.load(a_ ) # forward pass with torch.no_grad(): __snake_case = model(**a_ ) # verify the logits __snake_case = torch.Size([1, 1_408, 1_536] ) __snake_case = torch.tensor( [[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=a_ ) self.assertEqual(outputs.logits.shape , a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a_ , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) __snake_case = torch.tensor([0.5142] , device=a_ ) self.assertTrue(torch.allclose(outputs.loss , a_ , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) __snake_case = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=a_ ).to( a_ ) with torch.no_grad(): __snake_case = model(**a_ ) __snake_case = torch.tensor(torch.tensor([0.6469] ) , device=a_ ) self.assertTrue(torch.allclose(outputs.loss , a_ , atol=1e-4 ) )
680
'''simple docstring''' class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ): """simple docstring""" __snake_case = name __snake_case = value __snake_case = weight def __repr__( self : Optional[int] ): """simple docstring""" return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def A ( self : Any ): """simple docstring""" return self.value def A ( self : str ): """simple docstring""" return self.name def A ( self : int ): """simple docstring""" return self.weight def A ( self : Tuple ): """simple docstring""" return self.value / self.weight def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]: __snake_case = [] for i in range(len(_UpperCAmelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int: __snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase ) __snake_case = [] __snake_case , __snake_case = 0.0, 0.0 for i in range(len(_UpperCAmelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __UpperCAmelCase ( ) -> Optional[Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> int: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file __snake_case = TapasConfig.from_json_file(_UpperCAmelCase ) # set absolute/relative position embeddings parameter __snake_case = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __snake_case = TapasForQuestionAnswering(config=_UpperCAmelCase ) elif task == "WTQ": # run_task_main.py hparams __snake_case = 4 __snake_case = True # hparam_utils.py hparams __snake_case = 0.66_4694 __snake_case = 0.20_7951 __snake_case = 0.12_1194 __snake_case = True __snake_case = True __snake_case = False __snake_case = 0.035_2513 __snake_case = TapasForQuestionAnswering(config=_UpperCAmelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __snake_case = 4 __snake_case = False # hparam_utils.py hparams __snake_case = 36.4519 __snake_case = 0.90_3421 __snake_case = 222.088 __snake_case = True __snake_case = True __snake_case = True __snake_case = 0.76_3141 __snake_case = TapasForQuestionAnswering(config=_UpperCAmelCase ) elif task == "TABFACT": __snake_case = TapasForSequenceClassification(config=_UpperCAmelCase ) elif task == "MLM": __snake_case = TapasForMaskedLM(config=_UpperCAmelCase ) elif task == "INTERMEDIATE_PRETRAINING": __snake_case = TapasModel(config=_UpperCAmelCase ) else: raise ValueError(F'''Task {task} not supported.''' ) print(F'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model (weights and configuration) print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(_UpperCAmelCase ) # Save tokenizer files print(F'''Save tokenizer files to {pytorch_dump_path}''' ) __snake_case = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=5_12 ) tokenizer.save_pretrained(_UpperCAmelCase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) a : int = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
680
'''simple docstring''' import os from math import logaa def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int: __snake_case = 0 __snake_case = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ): __snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) ) if x * logaa(_UpperCAmelCase ) > largest: __snake_case = x * logaa(_UpperCAmelCase ) __snake_case = i + 1 return result if __name__ == "__main__": print(solution())
680
1
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration a : Optional[Any] = pytest.mark.integration a : List[str] = {'''comet'''} a : Union[str, Any] = importlib.util.find_spec('''fairseq''') is not None a : List[Any] = {'''code_eval'''} a : str = os.name == '''nt''' a : Union[str, Any] = {'''bertscore''', '''frugalscore''', '''perplexity'''} a : Dict = importlib.util.find_spec('''transformers''') is not None def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> Dict: @wraps(_UpperCAmelCase ) def wrapper(self : Optional[int] , _UpperCAmelCase : Optional[int] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("\"test requires Fairseq\"" ) else: test_case(self , _UpperCAmelCase ) return wrapper def __UpperCAmelCase ( _UpperCAmelCase : int ) -> Dict: @wraps(_UpperCAmelCase ) def wrapper(self : List[str] , _UpperCAmelCase : str ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("\"test requires transformers\"" ) else: test_case(self , _UpperCAmelCase ) return wrapper def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> str: @wraps(_UpperCAmelCase ) def wrapper(self : Any , _UpperCAmelCase : List[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("\"test not supported on Windows\"" ) else: test_case(self , _UpperCAmelCase ) return wrapper def __UpperCAmelCase ( ) -> Union[str, Any]: __snake_case = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) @local class SCREAMING_SNAKE_CASE__ ( parameterized.TestCase ): __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" ) def A ( self : List[str] , a_ : str ): """simple docstring""" __snake_case = "[...]" __snake_case = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , a_ ) ).module_path ) __snake_case = datasets.load.import_main_class(metric_module.__name__ , dataset=a_ ) # check parameters __snake_case = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(a_ , metric_module.__name__ ): with self.use_local_metrics(): try: __snake_case = doctest.testmod(a_ , verbose=a_ , raise_on_error=a_ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def A ( self : Union[str, Any] , a_ : Tuple ): """simple docstring""" __snake_case = "[...]" __snake_case = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , a_ ) ).module_path ) # run doctest with self.use_local_metrics(): __snake_case = doctest.testmod(a_ , verbose=a_ , raise_on_error=a_ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def A ( self : Any , a_ : Optional[Any] , a_ : List[Any] ): """simple docstring""" if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](a_ ): yield else: yield @contextmanager def A ( self : Union[str, Any] ): """simple docstring""" def load_local_metric(a_ : Dict , *a_ : List[Any] , **a_ : List[Any] ): return load_metric(os.path.join("metrics" , a_ ) , *a_ , **a_ ) with patch("datasets.load_metric" ) as mock_load_metric: __snake_case = load_local_metric yield @classmethod def A ( cls : List[str] , a_ : Optional[int] ): """simple docstring""" def wrapper(a_ : Optional[Any] ): __snake_case = contextmanager(a_ ) __snake_case = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("bleurt" ) def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> str: import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : Optional[Any] , a_ : Any ): """simple docstring""" assert len(input_dict["input_ids"] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor" ) as mock_create_predictor: __snake_case = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore" ) def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> str: import torch def bert_cos_score_idf(_UpperCAmelCase : Any , _UpperCAmelCase : Any , *_UpperCAmelCase : str , **_UpperCAmelCase : Dict ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_UpperCAmelCase ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model" ), patch( "bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf: __snake_case = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet" ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Any: def load_from_checkpoint(_UpperCAmelCase : Tuple ): class SCREAMING_SNAKE_CASE__ : def A ( self : Dict , a_ : Any , *a_ : Dict , **a_ : List[str] ): """simple docstring""" assert len(a_ ) == 2 __snake_case = [0.19, 0.92] return scores, sum(a_ ) / len(a_ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model" ) as mock_download_model: __snake_case = None with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint: __snake_case = load_from_checkpoint yield def __UpperCAmelCase ( ) -> Any: __snake_case = load_metric(os.path.join("metrics" , "seqeval" ) ) __snake_case = "ERROR" __snake_case = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(_UpperCAmelCase , match=re.escape(_UpperCAmelCase ) ): metric.compute(predictions=[] , references=[] , scheme=_UpperCAmelCase )
680
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer a : List[Any] = logging.get_logger(__name__) a : Dict = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } a : Any = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } a : Optional[int] = { '''facebook/blenderbot_small-90M''': 512, } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ): """simple docstring""" super().__init__( ByteLevelBPETokenizer( vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , ) __snake_case = add_prefix_space def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ): """simple docstring""" __snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ): """simple docstring""" __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
680
1
'''simple docstring''' import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = LayoutLMTokenizer __SCREAMING_SNAKE_CASE = LayoutLMTokenizerFast __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True def A ( self : Dict ): """simple docstring""" super().setUp() __snake_case = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def A ( self : Optional[Any] , **a_ : Union[str, Any] ): """simple docstring""" return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **a_ ) def A ( self : Any , a_ : Any ): """simple docstring""" __snake_case = "UNwant\u00E9d,running" __snake_case = "unwanted, running" return input_text, output_text def A ( self : List[Any] ): """simple docstring""" __snake_case = self.tokenizer_class(self.vocab_file ) __snake_case = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(a_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 10, 8, 9] ) def A ( self : int ): """simple docstring""" pass
680
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : str = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
680
1
'''simple docstring''' import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> str: # Initialise PyTorch model __snake_case = TaConfig.from_json_file(_UpperCAmelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) __snake_case = TaForConditionalGeneration(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) a : str = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
680
'''simple docstring''' # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers a : Optional[Any] = float('''nan''') class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : Optional[int] ): """simple docstring""" __snake_case = sys.stdout __snake_case = open(a_ , "a" ) def __getattr__( self : str , a_ : List[Any] ): """simple docstring""" return getattr(self.stdout , a_ ) def A ( self : Union[str, Any] , a_ : List[Any] ): """simple docstring""" self.stdout.write(a_ ) # strip tqdm codes self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) ) def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]: __snake_case = [] # deal with critical env vars __snake_case = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: __snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase ) if val is not None: cmd.append(F'''{key}={val}''' ) # python executable (not always needed if the script is executable) __snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(_UpperCAmelCase ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes __snake_case = [] __snake_case = "" while len(_UpperCAmelCase ) > 0: current_line += F'''{cmd.pop(0 )} ''' if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(_UpperCAmelCase ) __snake_case = "" return "\\\n".join(_UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple: # unwrap multi-line input __snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own __snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += F''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir __snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str: # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , ) __snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams __snake_case = variation.replace(" " , "-" ) with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f: f.write(result.stdout ) with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f: __snake_case = json.load(_UpperCAmelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict: __snake_case = [] __snake_case = [] __snake_case = F'''{id}: {variation:<{longest_variation_len}}''' __snake_case = F'''{preamble}: ''' __snake_case = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ): __snake_case = process_run_single( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case = single_run_metrics[target_metric_key] if not math.isnan(_UpperCAmelCase ): metrics.append(_UpperCAmelCase ) results.append(_UpperCAmelCase ) outcome += "✓" else: outcome += "✘" __snake_case = F'''\33[2K\r{outcome}''' if len(_UpperCAmelCase ) > 0: __snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} __snake_case = round(mean_metrics[target_metric_key] , 2 ) __snake_case = F'''{outcome} {mean_target}''' if len(_UpperCAmelCase ) > 1: results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}''' print(_UpperCAmelCase ) __snake_case = variation return mean_metrics else: print(_UpperCAmelCase ) return {variation_key: variation, target_metric_key: nan} def __UpperCAmelCase ( ) -> Optional[int]: __snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) ) return F''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]: __snake_case = pd.DataFrame(_UpperCAmelCase ) __snake_case = "variation" __snake_case = "diff_%" __snake_case = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan __snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(_UpperCAmelCase ): # as a fallback, use the minimal value as the sentinel __snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(_UpperCAmelCase ): __snake_case = df.apply( lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns __snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys] __snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols # capitalize __snake_case = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible __snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" ) __snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" ) __snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )] print("\n\n".join(_UpperCAmelCase ) ) def __UpperCAmelCase ( ) -> Dict: __snake_case = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , ) parser.add_argument( "--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) __snake_case = parser.parse_args() __snake_case = args.output_dir Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) __snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase ) # split each dimension into its --foo variations __snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty __snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) ) __snake_case = max(len(_UpperCAmelCase ) for x in variations ) # split wanted keys __snake_case = args.report_metric_keys.split() # capture prints into a log file for convenience __snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(F'''and this script\'s output is also piped into {report_fn}''' ) __snake_case = Tee(_UpperCAmelCase ) print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' ) print(F'''Base command: {" ".join(_UpperCAmelCase )}''' ) __snake_case = "variation" __snake_case = [] for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ): __snake_case = base_cmd + variation.split() results.append( process_run( id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) ) process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase ) if __name__ == "__main__": main()
680
1
'''simple docstring''' from string import ascii_uppercase a : List[str] = {char: i for i, char in enumerate(ascii_uppercase)} a : str = dict(enumerate(ascii_uppercase)) def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str: __snake_case = len(_UpperCAmelCase ) __snake_case = 0 while True: if x == i: __snake_case = 0 if len(_UpperCAmelCase ) == len(_UpperCAmelCase ): break key += key[i] i += 1 return key def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str: __snake_case = "" __snake_case = 0 for letter in message: if letter == " ": cipher_text += " " else: __snake_case = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str: __snake_case = "" __snake_case = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: __snake_case = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def __UpperCAmelCase ( ) -> None: __snake_case = "THE GERMAN ATTACK" __snake_case = "SECRET" __snake_case = generate_key(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = cipher_text(_UpperCAmelCase , _UpperCAmelCase ) print(F'''Encrypted Text = {s}''' ) print(F'''Original Text = {original_text(_UpperCAmelCase , _UpperCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
680
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __UpperCAmelCase ( ) -> Dict: with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" __snake_case = [1, 2, 3] with pytest.raises(_UpperCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 ) with pytest.raises(_UpperCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]: __snake_case = [1, 2] __snake_case = {"a": 1, "b": 2} __snake_case = {"a": [1, 2], "b": [3, 4]} __snake_case = {"a": {"1": 1}, "b": 2} __snake_case = {"a": 1, "b": 2, "c": 3, "d": 4} __snake_case = [2, 3] __snake_case = {"a": 2, "b": 3} __snake_case = {"a": [2, 3], "b": [4, 5]} __snake_case = {"a": {"1": 2}, "b": 3} __snake_case = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
680
1
'''simple docstring''' import argparse import copy def __UpperCAmelCase ( _UpperCAmelCase : str ) -> List[Any]: __snake_case = {} with open(_UpperCAmelCase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: __snake_case = [] _list.append([line.split()[1], line.split()[2]] ) __snake_case = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: __snake_case = [] _list.append([line.split()[0], line.split()[2]] ) __snake_case = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] ) -> List[str]: with open(_UpperCAmelCase ) as f: __snake_case = f.read(1 ) __snake_case = start_node __snake_case = [] __snake_case = start_node __snake_case = 0 while visiting not in first_solution: __snake_case = 1_00_00 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(_UpperCAmelCase ) and k[0] not in first_solution: __snake_case = k[1] __snake_case = k[0] first_solution.append(_UpperCAmelCase ) __snake_case = distance_of_first_solution + int(_UpperCAmelCase ) __snake_case = best_node first_solution.append(_UpperCAmelCase ) __snake_case = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 __snake_case = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_00_00 ) return first_solution, distance_of_first_solution def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Any ) -> Dict: __snake_case = [] for n in solution[1:-1]: __snake_case = solution.index(_UpperCAmelCase ) for kn in solution[1:-1]: __snake_case = solution.index(_UpperCAmelCase ) if n == kn: continue __snake_case = copy.deepcopy(_UpperCAmelCase ) __snake_case = kn __snake_case = n __snake_case = 0 for k in _tmp[:-1]: __snake_case = _tmp[_tmp.index(_UpperCAmelCase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: __snake_case = distance + int(i[1] ) _tmp.append(_UpperCAmelCase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) __snake_case = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda _UpperCAmelCase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict ) -> Optional[int]: __snake_case = 1 __snake_case = first_solution __snake_case = [] __snake_case = distance_of_first_solution __snake_case = solution while count <= iters: __snake_case = find_neighborhood(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = 0 __snake_case = neighborhood[index_of_best_solution] __snake_case = len(_UpperCAmelCase ) - 1 __snake_case = False while not found: __snake_case = 0 while i < len(_UpperCAmelCase ): if best_solution[i] != solution[i]: __snake_case = best_solution[i] __snake_case = solution[i] break __snake_case = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) __snake_case = True __snake_case = best_solution[:-1] __snake_case = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: __snake_case = cost __snake_case = solution else: __snake_case = index_of_best_solution + 1 __snake_case = neighborhood[index_of_best_solution] if len(_UpperCAmelCase ) >= size: tabu_list.pop(0 ) __snake_case = count + 1 return best_solution_ever, best_cost def __UpperCAmelCase ( _UpperCAmelCase : List[str]=None ) -> Tuple: __snake_case = generate_neighbours(args.File ) __snake_case , __snake_case = generate_first_solution( args.File , _UpperCAmelCase ) __snake_case , __snake_case = tabu_search( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.Iterations , args.Size , ) print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": a : Any = argparse.ArgumentParser(description='''Tabu Search''') parser.add_argument( '''-f''', '''--File''', type=str, help='''Path to the file containing the data''', required=True, ) parser.add_argument( '''-i''', '''--Iterations''', type=int, help='''How many iterations the algorithm should perform''', required=True, ) parser.add_argument( '''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True ) # Pass the arguments to main method main(parser.parse_args())
680
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : int = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """mobilenet_v2""" def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ): """simple docstring""" super().__init__(**a_ ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) __snake_case = num_channels __snake_case = image_size __snake_case = depth_multiplier __snake_case = depth_divisible_by __snake_case = min_depth __snake_case = expand_ratio __snake_case = output_stride __snake_case = first_layer_is_expansion __snake_case = finegrained_output __snake_case = hidden_act __snake_case = tf_padding __snake_case = classifier_dropout_prob __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = semantic_loss_ignore_index class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = version.parse("""1.11""" ) @property def A ( self : Optional[int] ): """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def A ( self : Optional[int] ): """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def A ( self : int ): """simple docstring""" return 1e-4
680
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def A ( self : Tuple ): """simple docstring""" torch.manual_seed(0 ) __snake_case = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def A ( self : int ): """simple docstring""" __snake_case = self.dummy_uncond_unet __snake_case = PNDMScheduler() __snake_case = PNDMPipeline(unet=a_ , scheduler=a_ ) pndm.to(a_ ) pndm.set_progress_bar_config(disable=a_ ) __snake_case = torch.manual_seed(0 ) __snake_case = pndm(generator=a_ , num_inference_steps=20 , output_type="numpy" ).images __snake_case = torch.manual_seed(0 ) __snake_case = pndm(generator=a_ , num_inference_steps=20 , output_type="numpy" , return_dict=a_ )[0] __snake_case = image[0, -3:, -3:, -1] __snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __snake_case = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : Optional[Any] ): """simple docstring""" __snake_case = "google/ddpm-cifar10-32" __snake_case = UNetaDModel.from_pretrained(a_ ) __snake_case = PNDMScheduler() __snake_case = PNDMPipeline(unet=a_ , scheduler=a_ ) pndm.to(a_ ) pndm.set_progress_bar_config(disable=a_ ) __snake_case = torch.manual_seed(0 ) __snake_case = pndm(generator=a_ , output_type="numpy" ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __snake_case = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
680
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : List[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """data2vec-text""" def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ): """simple docstring""" super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = hidden_act __snake_case = intermediate_size __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = position_embedding_type __snake_case = use_cache __snake_case = classifier_dropout class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): @property def A ( self : Any ): """simple docstring""" if self.task == "multiple-choice": __snake_case = {0: "batch", 1: "choice", 2: "sequence"} else: __snake_case = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
680
1
'''simple docstring''' import os import sys import unittest a : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) a : int = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') a : str = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = get_test_to_tester_mapping(a_ ) __snake_case = get_test_to_tester_mapping(a_ ) __snake_case = {"BertModelTest": "BertModelTester"} __snake_case = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(a_ ) , a_ ) self.assertEqual(get_test_info.to_json(a_ ) , a_ ) def A ( self : Optional[Any] ): """simple docstring""" __snake_case = get_model_to_test_mapping(a_ ) __snake_case = get_model_to_test_mapping(a_ ) __snake_case = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } __snake_case = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(a_ ) , a_ ) self.assertEqual(get_test_info.to_json(a_ ) , a_ ) def A ( self : Dict ): """simple docstring""" __snake_case = get_model_to_tester_mapping(a_ ) __snake_case = get_model_to_tester_mapping(a_ ) __snake_case = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } __snake_case = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(a_ ) , a_ ) self.assertEqual(get_test_info.to_json(a_ ) , a_ )
680
'''simple docstring''' import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) a : Tuple = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ): """simple docstring""" __snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] ) __snake_case = layer_outputs[0] return hidden_states @add_start_docstrings( """The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : int , a_ : int ): """simple docstring""" super().__init__(a_ ) __snake_case = BertEncoderWithPabee(a_ ) self.init_weights() __snake_case = 0 __snake_case = 0 __snake_case = 0 __snake_case = 0 def A ( self : Optional[int] , a_ : Union[str, Any] ): """simple docstring""" __snake_case = threshold def A ( self : Optional[Any] , a_ : Union[str, Any] ): """simple docstring""" __snake_case = patience def A ( self : Any ): """simple docstring""" __snake_case = 0 __snake_case = 0 def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.inference_layers_num / self.inference_instances_num __snake_case = ( f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =''' f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***''' ) print(a_ ) @add_start_docstrings_to_model_forward(a_ ) def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ): """simple docstring""" if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: __snake_case = input_ids.size() elif inputs_embeds is not None: __snake_case = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) __snake_case = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __snake_case = torch.ones(a_ , device=a_ ) if token_type_ids is None: __snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __snake_case = self.get_extended_attention_mask(a_ , a_ , a_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: __snake_case , __snake_case , __snake_case = encoder_hidden_states.size() __snake_case = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: __snake_case = torch.ones(a_ , device=a_ ) __snake_case = self.invert_attention_mask(a_ ) else: __snake_case = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers ) __snake_case = self.embeddings( input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ ) __snake_case = embedding_output if self.training: __snake_case = [] for i in range(self.config.num_hidden_layers ): __snake_case = self.encoder.adaptive_forward( a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ ) __snake_case = self.pooler(a_ ) __snake_case = output_layers[i](output_dropout(a_ ) ) res.append(a_ ) elif self.patience == 0: # Use all layers for inference __snake_case = self.encoder( a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , ) __snake_case = self.pooler(encoder_outputs[0] ) __snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )] else: __snake_case = 0 __snake_case = None __snake_case = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 __snake_case = self.encoder.adaptive_forward( a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ ) __snake_case = self.pooler(a_ ) __snake_case = output_layers[i](a_ ) if regression: __snake_case = logits.detach() if patient_result is not None: __snake_case = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: __snake_case = 0 else: __snake_case = logits.detach().argmax(dim=1 ) if patient_result is not None: __snake_case = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(a_ ) ): patient_counter += 1 else: __snake_case = 0 __snake_case = logits if patient_counter == self.patience: break __snake_case = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( """Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : List[str] , a_ : Tuple ): """simple docstring""" super().__init__(a_ ) __snake_case = config.num_labels __snake_case = BertModelWithPabee(a_ ) __snake_case = nn.Dropout(config.hidden_dropout_prob ) __snake_case = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(a_ ) def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ): """simple docstring""" __snake_case = self.bert( input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) __snake_case = (logits[-1],) if labels is not None: __snake_case = None __snake_case = 0 for ix, logits_item in enumerate(a_ ): if self.num_labels == 1: # We are doing regression __snake_case = MSELoss() __snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: __snake_case = CrossEntropyLoss() __snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: __snake_case = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 __snake_case = (total_loss / total_weights,) + outputs return outputs
680
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import _LazyModule a : Any = {'''tokenization_tapex''': ['''TapexTokenizer''']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
680
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = is_training __snake_case = use_labels __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = backbone_out_indices __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = initializer_range __snake_case = num_labels __snake_case = backbone_featmap_shape __snake_case = scope __snake_case = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) __snake_case = (image_size // patch_size) ** 2 __snake_case = num_patches + 1 def A ( self : int ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __snake_case = self.get_config() return config, pixel_values, labels def A ( self : Optional[Any] ): """simple docstring""" __snake_case = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [96, 192, 384, 768], "num_groups": 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , ) def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ): """simple docstring""" __snake_case = DPTModel(config=a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ): """simple docstring""" __snake_case = self.num_labels __snake_case = DPTForDepthEstimation(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ): """simple docstring""" __snake_case = self.num_labels __snake_case = DPTForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , labels=a_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def A ( self : List[Any] ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () __SCREAMING_SNAKE_CASE = ( { """depth-estimation""": DPTForDepthEstimation, """feature-extraction""": DPTModel, """image-segmentation""": DPTForSemanticSegmentation, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def A ( self : Optional[Any] ): """simple docstring""" __snake_case = DPTModelTester(self ) __snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 ) def A ( self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DPT does not use inputs_embeds" ) def A ( self : Any ): """simple docstring""" pass def A ( self : Union[str, Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ , nn.Linear ) ) def A ( self : List[str] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a_ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ["pixel_values"] self.assertListEqual(arg_names[:1] , a_ ) def A ( self : int ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*a_ ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def A ( self : Optional[int] ): """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = True if model_class in get_values(a_ ): continue __snake_case = model_class(a_ ) model.to(a_ ) model.train() __snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ ) __snake_case = model(**a_ ).loss loss.backward() def A ( self : int ): """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = False __snake_case = True if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing: continue __snake_case = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() __snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ ) __snake_case = model(**a_ ).loss loss.backward() def A ( self : Dict ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = _config_zero_init(a_ ) for model_class in self.all_model_classes: __snake_case = model_class(config=a_ ) # Skip the check for the backbone __snake_case = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": __snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : Tuple ): """simple docstring""" pass @slow def A ( self : int ): """simple docstring""" for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: __snake_case = DPTModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def A ( self : int ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = "add" with self.assertRaises(a_ ): __snake_case = DPTForDepthEstimation(a_ ) def __UpperCAmelCase ( ) -> Union[str, Any]: __snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : Dict ): """simple docstring""" __snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" ) __snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ ) __snake_case = prepare_img() __snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ ) # forward pass with torch.no_grad(): __snake_case = model(**a_ ) __snake_case = outputs.predicted_depth # verify the predicted depth __snake_case = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , a_ ) __snake_case = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
680
1
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : List[str] , a_ : Optional[Any] , a_ : Optional[int]=13 , a_ : Optional[Any]=7 , a_ : Any=True , a_ : str=True , a_ : int=True , a_ : List[str]=True , a_ : Dict=True , a_ : Any=False , a_ : Any=False , a_ : Optional[Any]=False , a_ : Optional[int]=2 , a_ : List[str]=99 , a_ : List[Any]=0 , a_ : Tuple=32 , a_ : str=5 , a_ : Dict=4 , a_ : Tuple=0.1 , a_ : Any=0.1 , a_ : Dict=512 , a_ : Optional[Any]=12 , a_ : Dict=2 , a_ : Tuple=0.02 , a_ : Tuple=3 , a_ : List[str]=4 , a_ : Tuple="last" , a_ : Optional[int]=None , a_ : Optional[Any]=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_lengths __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = gelu_activation __snake_case = sinusoidal_embeddings __snake_case = causal __snake_case = asm __snake_case = n_langs __snake_case = vocab_size __snake_case = n_special __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = num_choices __snake_case = summary_type __snake_case = use_proj __snake_case = scope def A ( self : List[Any] ): """simple docstring""" __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case = None if self.use_input_lengths: __snake_case = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __snake_case = None __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case = ids_tensor([self.batch_size] , 2 ).float() __snake_case = ids_tensor([self.batch_size] , self.num_choices ) __snake_case = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def A ( self : Optional[int] ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def A ( self : Union[str, Any] , a_ : Union[str, Any] , a_ : Dict , a_ : str , a_ : str , a_ : Optional[int] , a_ : Dict , a_ : str , a_ : Optional[int] , a_ : Dict , ): """simple docstring""" __snake_case = FlaubertModel(config=a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , lengths=a_ , langs=a_ ) __snake_case = model(a_ , langs=a_ ) __snake_case = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Tuple , a_ : int , a_ : str , a_ : str , a_ : Tuple , a_ : str , a_ : Optional[Any] , a_ : str , a_ : Dict , a_ : Optional[int] , ): """simple docstring""" __snake_case = FlaubertWithLMHeadModel(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Union[str, Any] , a_ : int , a_ : int , a_ : str , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : Any , a_ : Tuple , a_ : Any , a_ : Tuple , ): """simple docstring""" __snake_case = FlaubertForQuestionAnsweringSimple(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) __snake_case = model(a_ , start_positions=a_ , end_positions=a_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Tuple , a_ : Any , a_ : Union[str, Any] , a_ : int , a_ : Any , a_ : Union[str, Any] , a_ : str , a_ : List[Any] , a_ : Optional[int] , a_ : Union[str, Any] , ): """simple docstring""" __snake_case = FlaubertForQuestionAnswering(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) __snake_case = model( a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , ) __snake_case = model( a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , ) ((__snake_case) , ) = result_with_labels.to_tuple() __snake_case = model(a_ , start_positions=a_ , end_positions=a_ ) ((__snake_case) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def A ( self : int , a_ : Any , a_ : Union[str, Any] , a_ : Any , a_ : int , a_ : List[str] , a_ : Optional[Any] , a_ : Dict , a_ : Any , a_ : List[str] , ): """simple docstring""" __snake_case = FlaubertForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ ) __snake_case = model(a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A ( self : List[Any] , a_ : Optional[int] , a_ : int , a_ : int , a_ : List[str] , a_ : Dict , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Optional[Any] , ): """simple docstring""" __snake_case = self.num_labels __snake_case = FlaubertForTokenClassification(a_ ) model.to(a_ ) model.eval() __snake_case = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Optional[Any] , a_ : List[Any] , a_ : Dict , a_ : int , a_ : Dict , a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict , a_ : List[Any] , ): """simple docstring""" __snake_case = self.num_choices __snake_case = FlaubertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() __snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case = model( a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = ( { """feature-extraction""": FlaubertModel, """fill-mask""": FlaubertWithLMHeadModel, """question-answering""": FlaubertForQuestionAnsweringSimple, """text-classification""": FlaubertForSequenceClassification, """token-classification""": FlaubertForTokenClassification, """zero-shot""": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def A ( self : List[Any] , a_ : Tuple , a_ : Optional[int] , a_ : str , a_ : int , a_ : Any ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def A ( self : List[str] , a_ : List[str] , a_ : List[Any] , a_ : Any=False ): """simple docstring""" __snake_case = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": __snake_case = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) __snake_case = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) return inputs_dict def A ( self : List[str] ): """simple docstring""" __snake_case = FlaubertModelTester(self ) __snake_case = ConfigTester(self , config_class=a_ , emb_dim=37 ) def A ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def A ( self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*a_ ) def A ( self : str ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*a_ ) def A ( self : Tuple ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*a_ ) def A ( self : str ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*a_ ) def A ( self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*a_ ) def A ( self : Optional[int] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*a_ ) def A ( self : Optional[int] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*a_ ) @slow def A ( self : Optional[Any] ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = FlaubertModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def A ( self : Tuple ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return __snake_case = True __snake_case = model_class(config=a_ ) __snake_case = self._prepare_for_class(a_ , a_ ) __snake_case = torch.jit.trace( a_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_ , os.path.join(a_ , "traced_model.pt" ) ) __snake_case = torch.jit.load(os.path.join(a_ , "traced_model.pt" ) , map_location=a_ ) loaded(inputs_dict["input_ids"].to(a_ ) , inputs_dict["attention_mask"].to(a_ ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def A ( self : Any ): """simple docstring""" __snake_case = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) __snake_case = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): __snake_case = model(a_ )[0] __snake_case = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , a_ ) __snake_case = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
680
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def A ( self : Any ): """simple docstring""" return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
680
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Any = logging.get_logger(__name__) a : int = { '''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''', '''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """xlm-roberta-xl""" def __init__( self : List[str] , a_ : Optional[Any]=250_880 , a_ : Any=2_560 , a_ : Dict=36 , a_ : Union[str, Any]=32 , a_ : List[str]=10_240 , a_ : Tuple="gelu" , a_ : Optional[int]=0.1 , a_ : str=0.1 , a_ : str=514 , a_ : Union[str, Any]=1 , a_ : str=0.02 , a_ : List[str]=1e-05 , a_ : List[Any]=1 , a_ : Any=0 , a_ : Union[str, Any]=2 , a_ : str="absolute" , a_ : int=True , a_ : Any=None , **a_ : Any , ): """simple docstring""" super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = hidden_act __snake_case = intermediate_size __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = position_embedding_type __snake_case = use_cache __snake_case = classifier_dropout class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): @property def A ( self : Optional[int] ): """simple docstring""" if self.task == "multiple-choice": __snake_case = {0: "batch", 1: "choice", 2: "sequence"} else: __snake_case = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
680
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device a : Optional[Any] = False class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): pass @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : int ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : List[Any] ): """simple docstring""" __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" ) # remove text_unet pipe.remove_unused_weights() pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = "A painting of a squirrel eating a burger " __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a_ ) __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = generator.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def A ( self : Optional[int] ): """simple docstring""" __snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained( "shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __snake_case = "A painting of a squirrel eating a burger " __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images __snake_case = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) __snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
680
1
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva a : Any = '''''' a : List[str] = '''''' a : List[Any] = '''''' a : List[Any] = 1 # (0 is vertical, 1 is horizontal) def __UpperCAmelCase ( ) -> None: __snake_case , __snake_case = get_dataset(_UpperCAmelCase , _UpperCAmelCase ) print("Processing..." ) __snake_case , __snake_case , __snake_case = update_image_and_anno(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for index, image in enumerate(_UpperCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __snake_case = random_chars(32 ) __snake_case = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0] __snake_case = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(F'''/{file_root}.jpg''' , _UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'''Success {index+1}/{len(_UpperCAmelCase )} with {file_name}''' ) __snake_case = [] for anno in new_annos[index]: __snake_case = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(_UpperCAmelCase ) with open(F'''/{file_root}.txt''' , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> tuple[list, list]: __snake_case = [] __snake_case = [] for label_file in glob.glob(os.path.join(_UpperCAmelCase , "*.txt" ) ): __snake_case = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(_UpperCAmelCase ) as in_file: __snake_case = in_file.readlines() __snake_case = os.path.join(_UpperCAmelCase , F'''{label_name}.jpg''' ) __snake_case = [] for obj_list in obj_lists: __snake_case = obj_list.rstrip("\n" ).split(" " ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(_UpperCAmelCase ) labels.append(_UpperCAmelCase ) return img_paths, labels def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int = 1 ) -> tuple[list, list, list]: __snake_case = [] __snake_case = [] __snake_case = [] for idx in range(len(_UpperCAmelCase ) ): __snake_case = [] __snake_case = img_list[idx] path_list.append(_UpperCAmelCase ) __snake_case = anno_list[idx] __snake_case = cva.imread(_UpperCAmelCase ) if flip_type == 1: __snake_case = cva.flip(_UpperCAmelCase , _UpperCAmelCase ) for bbox in img_annos: __snake_case = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __snake_case = cva.flip(_UpperCAmelCase , _UpperCAmelCase ) for bbox in img_annos: __snake_case = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(_UpperCAmelCase ) new_imgs_list.append(_UpperCAmelCase ) return new_imgs_list, new_annos_lists, path_list def __UpperCAmelCase ( _UpperCAmelCase : int = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" __snake_case = ascii_lowercase + digits return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) ) if __name__ == "__main__": main() print('''DONE ✅''')
680
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType a : Any = get_logger(__name__) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any: os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __snake_case = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if accelerator.process_index == 0: logger.info(F'''Saving model to {output_model_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __snake_case = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Saving model to {output_model_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) logger.info(F'''Saving model to {ckpt_dir}''' ) __snake_case = {"model": state_dict} dist_cp.save_state_dict( state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , ) logger.info(F'''Model saved to {ckpt_dir}''' ) def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]: accelerator.wait_for_everyone() with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return __snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading model from {input_model_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __snake_case = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading model from {input_model_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __snake_case = ( os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' ) if F'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading model from {ckpt_dir}''' ) __snake_case = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , ) __snake_case = state_dict["model"] logger.info(F'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(_UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]: os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __snake_case = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Optimizer state saved in {output_optimizer_file}''' ) else: __snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) logger.info(F'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , ) logger.info(F'''Optimizer state saved in {ckpt_dir}''' ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]: accelerator.wait_for_everyone() with FSDP.state_dict_type( _UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __snake_case = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __snake_case = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) __snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' ) __snake_case = torch.load(_UpperCAmelCase ) logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' ) else: __snake_case = ( os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if F'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading Optimizer from {ckpt_dir}''' ) __snake_case = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , ) __snake_case = optim_state["optimizer"] logger.info(F'''Optimizer loaded from {ckpt_dir}''' ) __snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) optimizer.load_state_dict(_UpperCAmelCase )
680
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = None class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase ): __SCREAMING_SNAKE_CASE = 2 @register_to_config def __init__( self : Optional[int] , a_ : float = 0.02 , a_ : float = 100 , a_ : float = 1.007 , a_ : float = 80 , a_ : float = 0.05 , a_ : float = 50 , ): """simple docstring""" __snake_case = sigma_max # setable values __snake_case = None __snake_case = None __snake_case = None # sigma(t_i) def A ( self : List[str] , a_ : torch.FloatTensor , a_ : Optional[int] = None ): """simple docstring""" return sample def A ( self : Dict , a_ : int , a_ : Union[str, torch.device] = None ): """simple docstring""" __snake_case = num_inference_steps __snake_case = np.arange(0 , self.num_inference_steps )[::-1].copy() __snake_case = torch.from_numpy(a_ ).to(a_ ) __snake_case = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __snake_case = torch.tensor(a_ , dtype=torch.floataa , device=a_ ) def A ( self : List[str] , a_ : torch.FloatTensor , a_ : float , a_ : Optional[torch.Generator] = None ): """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: __snake_case = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: __snake_case = 0 # sample eps ~ N(0, S_noise^2 * I) __snake_case = self.config.s_noise * randn_tensor(sample.shape , generator=a_ ).to(sample.device ) __snake_case = sigma + gamma * sigma __snake_case = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def A ( self : Optional[int] , a_ : torch.FloatTensor , a_ : float , a_ : float , a_ : torch.FloatTensor , a_ : bool = True , ): """simple docstring""" __snake_case = sample_hat + sigma_hat * model_output __snake_case = (sample_hat - pred_original_sample) / sigma_hat __snake_case = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=a_ , derivative=a_ , pred_original_sample=a_ ) def A ( self : Optional[int] , a_ : torch.FloatTensor , a_ : float , a_ : float , a_ : torch.FloatTensor , a_ : torch.FloatTensor , a_ : torch.FloatTensor , a_ : bool = True , ): """simple docstring""" __snake_case = sample_prev + sigma_prev * model_output __snake_case = (sample_prev - pred_original_sample) / sigma_prev __snake_case = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=a_ , derivative=a_ , pred_original_sample=a_ ) def A ( self : List[Any] , a_ : int , a_ : Tuple , a_ : List[str] ): """simple docstring""" raise NotImplementedError()
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str: if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("iterations must be defined as integers" ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) __snake_case = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_UpperCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str: if number > 0: raise ValueError("input must be a negative integer" ) __snake_case = len(bin(_UpperCAmelCase )[3:] ) __snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:] __snake_case = ( ( "1" + "0" * (binary_number_length - len(_UpperCAmelCase )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str: if number > 0: raise ValueError("input must be a negative integer" ) __snake_case = len(bin(_UpperCAmelCase )[3:] ) __snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:] __snake_case = ( ( "1" + "0" * (binary_number_length - len(_UpperCAmelCase )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch a : Dict = '''sshleifer/bart-tiny-random''' a : str = '''patrickvonplaten/t5-tiny-random''' @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def A ( self : Union[str, Any] ): """simple docstring""" return AutoConfig.from_pretrained(a_ ) def A ( self : str ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def A ( self : Optional[Any] ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ ) def A ( self : Dict ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def A ( self : Optional[int] ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def A ( self : Dict ): """simple docstring""" with self.assertRaises(a_ ): create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
680
'''simple docstring''' from timeit import timeit def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int: if number < 0: raise ValueError("the value of input must not be negative" ) __snake_case = 0 while number: number &= number - 1 result += 1 return result def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int: if number < 0: raise ValueError("the value of input must not be negative" ) __snake_case = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def __UpperCAmelCase ( ) -> None: def do_benchmark(_UpperCAmelCase : int ) -> None: __snake_case = "import __main__ as z" print(F'''Benchmark when {number = }:''' ) print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' ) __snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase ) print(F'''timeit() runs in {timing} seconds''' ) print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' ) __snake_case = timeit( "z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , ) print(F'''timeit() runs in {timing} seconds''' ) for number in (25, 37, 58, 0): do_benchmark(_UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
680
1
'''simple docstring''' import argparse from collections import defaultdict import yaml a : Dict = '''docs/source/en/_toctree.yml''' def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> Tuple: __snake_case = defaultdict(_UpperCAmelCase ) __snake_case = [] __snake_case = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(_UpperCAmelCase ) __snake_case = new_doc_list __snake_case = [key for key, value in counts.items() if value > 1] __snake_case = [] for duplicate_key in duplicates: __snake_case = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(_UpperCAmelCase ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) __snake_case = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(_UpperCAmelCase ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(_UpperCAmelCase ) # Sort return overview_doc def __UpperCAmelCase ( _UpperCAmelCase : Any=False ) -> List[Any]: with open(_UpperCAmelCase , encoding="utf-8" ) as f: __snake_case = yaml.safe_load(f.read() ) # Get to the API doc __snake_case = 0 while content[api_idx]["title"] != "API": api_idx += 1 __snake_case = content[api_idx]["sections"] # Then to the model doc __snake_case = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 __snake_case = api_doc[scheduler_idx]["sections"] __snake_case = clean_doc_toc(_UpperCAmelCase ) __snake_case = False if new_scheduler_doc != scheduler_doc: __snake_case = True if overwrite: __snake_case = new_scheduler_doc if diff: if overwrite: __snake_case = api_doc with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_UpperCAmelCase , allow_unicode=_UpperCAmelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def __UpperCAmelCase ( _UpperCAmelCase : Any=False ) -> List[str]: with open(_UpperCAmelCase , encoding="utf-8" ) as f: __snake_case = yaml.safe_load(f.read() ) # Get to the API doc __snake_case = 0 while content[api_idx]["title"] != "API": api_idx += 1 __snake_case = content[api_idx]["sections"] # Then to the model doc __snake_case = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 __snake_case = False __snake_case = api_doc[pipeline_idx]["sections"] __snake_case = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: __snake_case = pipeline_doc["section"] __snake_case = clean_doc_toc(_UpperCAmelCase ) if overwrite: __snake_case = new_sub_pipeline_doc new_pipeline_docs.append(_UpperCAmelCase ) # sort overall pipeline doc __snake_case = clean_doc_toc(_UpperCAmelCase ) if new_pipeline_docs != pipeline_docs: __snake_case = True if overwrite: __snake_case = new_pipeline_docs if diff: if overwrite: __snake_case = api_doc with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_UpperCAmelCase , allow_unicode=_UpperCAmelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a : str = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') a : str = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
680
'''simple docstring''' import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch a : Dict = '''sshleifer/bart-tiny-random''' a : str = '''patrickvonplaten/t5-tiny-random''' @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def A ( self : Union[str, Any] ): """simple docstring""" return AutoConfig.from_pretrained(a_ ) def A ( self : str ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def A ( self : Optional[Any] ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ ) def A ( self : Dict ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def A ( self : Optional[int] ): """simple docstring""" __snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def A ( self : Dict ): """simple docstring""" with self.assertRaises(a_ ): create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
680
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging a : Dict = logging.get_logger(__name__) a : str = '''▁''' a : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''} a : Optional[Any] = { '''vocab_file''': { '''facebook/mbart-large-50-one-to-many-mmt''': ( '''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model''' ), } } a : List[str] = { '''facebook/mbart-large-50-one-to-many-mmt''': 1_024, } # fmt: off a : List[str] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI'''] class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self : Optional[int] , a_ : Any , a_ : int=None , a_ : List[str]=None , a_ : str="</s>" , a_ : Dict="</s>" , a_ : Optional[Any]="<s>" , a_ : Union[str, Any]="<unk>" , a_ : Tuple="<pad>" , a_ : str="<mask>" , a_ : Optional[Dict[str, Any]] = None , **a_ : Optional[int] , ): """simple docstring""" __snake_case = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token __snake_case = {} if sp_model_kwargs is None else sp_model_kwargs __snake_case = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=a_ , tgt_lang=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , ) __snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(a_ ) ) __snake_case = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __snake_case = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __snake_case = 1 __snake_case = len(self.sp_model ) __snake_case = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(a_ ) } __snake_case = {v: k for k, v in self.lang_code_to_id.items()} __snake_case = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __snake_case = src_lang if src_lang is not None else "en_XX" __snake_case = self.lang_code_to_id[self._src_lang] __snake_case = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def A ( self : Dict ): """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def A ( self : Dict ): """simple docstring""" return self._src_lang @src_lang.setter def A ( self : List[Any] , a_ : str ): """simple docstring""" __snake_case = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Union[str, Any] ): """simple docstring""" __snake_case = self.__dict__.copy() __snake_case = None return state def __setstate__( self : Optional[int] , a_ : Dict ): """simple docstring""" __snake_case = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __snake_case = {} __snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A ( self : Any ): """simple docstring""" __snake_case = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A ( self : int , a_ : str ): """simple docstring""" return self.sp_model.encode(a_ , out_type=a_ ) def A ( self : List[str] , a_ : str ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __snake_case = self.sp_model.PieceToId(a_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A ( self : Dict , a_ : int ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def A ( self : str , a_ : Any ): """simple docstring""" __snake_case = [] __snake_case = "" __snake_case = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(a_ ) + token __snake_case = True __snake_case = [] else: current_sub_tokens.append(a_ ) __snake_case = False out_string += self.sp_model.decode(a_ ) return out_string.strip() def A ( self : Dict , a_ : str , a_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(a_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __snake_case = os.path.join( a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a_ ) elif not os.path.isfile(self.vocab_file ): with open(a_ , "wb" ) as fi: __snake_case = self.sp_model.serialized_model_proto() fi.write(a_ ) return (out_vocab_file,) def A ( self : Dict , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ ) __snake_case = [1] * len(self.prefix_tokens ) __snake_case = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(a_ )) + suffix_ones return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def A ( self : Union[str, Any] , a_ : Tuple , a_ : str , a_ : Optional[str] , a_ : Optional[str] , **a_ : str ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) __snake_case = src_lang __snake_case = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ ) __snake_case = self.convert_tokens_to_ids(a_ ) __snake_case = tgt_lang_id return inputs def A ( self : Optional[Any] , a_ : List[str] , a_ : str = "en_XX" , a_ : Optional[List[str]] = None , a_ : str = "ro_RO" , **a_ : List[Any] , ): """simple docstring""" __snake_case = src_lang __snake_case = tgt_lang return super().prepare_seqaseq_batch(a_ , a_ , **a_ ) def A ( self : Optional[Any] ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def A ( self : List[Any] ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def A ( self : Optional[Any] , a_ : str ): """simple docstring""" __snake_case = self.lang_code_to_id[src_lang] __snake_case = [self.cur_lang_code_id] __snake_case = [self.eos_token_id] def A ( self : int , a_ : str ): """simple docstring""" __snake_case = self.lang_code_to_id[tgt_lang] __snake_case = [self.cur_lang_code_id] __snake_case = [self.eos_token_id]
680
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors a : Any = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """sequence-classification""" def __init__( self : List[str] , a_ : str ): """simple docstring""" if type(a_ ) == dict: __snake_case = Namespace(**a_ ) __snake_case = glue_output_modes[hparams.task] __snake_case = glue_tasks_num_labels[hparams.task] super().__init__(a_ , a_ , self.mode ) def A ( self : Union[str, Any] , **a_ : List[Any] ): """simple docstring""" return self.model(**a_ ) def A ( self : int , a_ : Optional[Any] , a_ : int ): """simple docstring""" __snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __snake_case = self(**a_ ) __snake_case = outputs[0] __snake_case = self.trainer.lr_schedulers[0]["scheduler"] __snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def A ( self : List[str] ): """simple docstring""" __snake_case = self.hparams __snake_case = processors[args.task]() __snake_case = processor.get_labels() for mode in ["train", "dev"]: __snake_case = self._feature_file(a_ ) if os.path.exists(a_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , a_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) __snake_case = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) __snake_case = convert_examples_to_features( a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , a_ ) torch.save(a_ , a_ ) def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ): """simple docstring""" __snake_case = "dev" if mode == "test" else mode __snake_case = self._feature_file(a_ ) logger.info("Loading features from cached file %s" , a_ ) __snake_case = torch.load(a_ ) __snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) __snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": __snake_case = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": __snake_case = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , ) def A ( self : int , a_ : List[str] , a_ : Tuple ): """simple docstring""" __snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __snake_case = self(**a_ ) __snake_case , __snake_case = outputs[:2] __snake_case = logits.detach().cpu().numpy() __snake_case = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def A ( self : Dict , a_ : Optional[int] ): """simple docstring""" __snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() __snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": __snake_case = np.argmax(a_ , axis=1 ) elif self.hparams.glue_output_mode == "regression": __snake_case = np.squeeze(a_ ) __snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 ) __snake_case = [[] for _ in range(out_label_ids.shape[0] )] __snake_case = [[] for _ in range(out_label_ids.shape[0] )] __snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )} __snake_case = dict(results.items() ) __snake_case = results return ret, preds_list, out_label_list def A ( self : Tuple , a_ : list ): """simple docstring""" __snake_case , __snake_case , __snake_case = self._eval_end(a_ ) __snake_case = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def A ( self : int , a_ : Tuple ): """simple docstring""" __snake_case , __snake_case , __snake_case = self._eval_end(a_ ) __snake_case = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def A ( a_ : str , a_ : Any ): """simple docstring""" BaseTransformer.add_model_specific_args(a_ , a_ ) parser.add_argument( "--max_seq_length" , default=128 , type=a_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def __UpperCAmelCase ( ) -> Union[str, Any]: __snake_case = argparse.ArgumentParser() add_generic_args(_UpperCAmelCase , os.getcwd() ) __snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() ) __snake_case = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: __snake_case = os.path.join( "./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , ) os.makedirs(args.output_dir ) __snake_case = GLUETransformer(_UpperCAmelCase ) __snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: __snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) ) __snake_case = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_UpperCAmelCase ) if __name__ == "__main__": main()
680
1
'''simple docstring''' import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger a : Any = '''<<<<<<< This should probably be modified because it mentions: ''' a : Dict = '''======= >>>>>>> ''' a : Tuple = [ '''TextEncoderConfig''', '''ByteTextEncoder''', '''SubwordTextEncoder''', '''encoder_config''', '''maybe_build_from_corpus''', '''manual_dir''', ] a : Any = [ # (pattern, replacement) # Order is important here for some replacements (r'''tfds\.core''', r'''datasets'''), (r'''tf\.io\.gfile\.GFile''', r'''open'''), (r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''), (r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''), (r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''), (r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''), (r'''tfds\.features\.FeaturesDict\(''', r'''dict('''), (r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''), (r'''tfds\.''', r'''datasets.'''), (r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''), (r'''self\.builder_config''', r'''self.config'''), ] def __UpperCAmelCase ( _UpperCAmelCase : Namespace ) -> Dict: return ConvertCommand(args.tfds_path , args.datasets_directory ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): @staticmethod def A ( a_ : ArgumentParser ): """simple docstring""" __snake_case = parser.add_parser( "convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , ) train_parser.add_argument( "--tfds_path" , type=a_ , required=a_ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , ) train_parser.add_argument( "--datasets_directory" , type=a_ , required=a_ , help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=a_ ) def __init__( self : str , a_ : str , a_ : str , *a_ : Dict ): """simple docstring""" __snake_case = get_logger("datasets-cli/converting" ) __snake_case = tfds_path __snake_case = datasets_directory def A ( self : Optional[Any] ): """simple docstring""" if os.path.isdir(self._tfds_path ): __snake_case = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): __snake_case = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) __snake_case = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) __snake_case = [] __snake_case = [] __snake_case = {} if os.path.isdir(self._tfds_path ): __snake_case = os.listdir(a_ ) else: __snake_case = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) __snake_case = os.path.join(a_ , a_ ) __snake_case = os.path.join(a_ , a_ ) if not os.path.isfile(a_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(a_ , encoding="utf-8" ) as f: __snake_case = f.readlines() __snake_case = [] __snake_case = False __snake_case = False __snake_case = [] for line in lines: __snake_case = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: __snake_case = "import datasets\n" elif "import tensorflow" in out_line: # order is important here __snake_case = "" continue elif "from absl import logging" in out_line: __snake_case = "from datasets import logging\n" elif "getLogger" in out_line: __snake_case = out_line.replace("getLogger" , "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): __snake_case = True __snake_case = list(filter(lambda a_ : e in out_line , a_ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(a_ ) + "\n" ) out_lines.append(a_ ) out_lines.append(a_ ) continue else: for pattern, replacement in TO_CONVERT: __snake_case = re.sub(a_ , a_ , a_ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: __snake_case = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , a_ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) __snake_case = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: __snake_case = True out_lines.append(a_ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset __snake_case = f_name.replace(".py" , "" ) __snake_case = os.path.join(a_ , a_ ) __snake_case = os.path.join(a_ , a_ ) os.makedirs(a_ , exist_ok=a_ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(a_ ) if needs_manual_update: with_manual_update.append(a_ ) with open(a_ , "w" , encoding="utf-8" ) as f: f.writelines(a_ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: __snake_case = os.path.basename(a_ ) __snake_case = imports_to_builder_map[f_name.replace(".py" , "" )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(a_ , a_ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
680
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] ) @pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase ) __snake_case = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: __snake_case = dataset_size < in_memory_max_size else: __snake_case = False __snake_case = is_small_dataset(_UpperCAmelCase ) assert result == expected
680
1
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> Dict: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : List[Any] , a_ : nn.Module , a_ : int ): """simple docstring""" super().__init__() __snake_case = module __snake_case = nn.Sequential( nn.Linear(module.in_features , a_ , bias=a_ ) , nn.Linear(a_ , module.out_features , bias=a_ ) , ) __snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=a_ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def A ( self : Optional[int] , a_ : Optional[Any] , *a_ : Union[str, Any] , **a_ : List[Any] ): """simple docstring""" return self.module(a_ , *a_ , **a_ ) + self.adapter(a_ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module __SCREAMING_SNAKE_CASE = """bigscience/bloom-1b7""" # Constant values __SCREAMING_SNAKE_CASE = 2.109_6595_5269_2574 __SCREAMING_SNAKE_CASE = """Hello my name is""" __SCREAMING_SNAKE_CASE = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) __SCREAMING_SNAKE_CASE = 10 def A ( self : Optional[Any] ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained(self.model_name ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : str ): """simple docstring""" super().setUp() # Models and tokenizer __snake_case = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="auto" ) __snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" ) def A ( self : str ): """simple docstring""" del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def A ( self : Dict ): """simple docstring""" __snake_case = self.model_abit.config self.assertTrue(hasattr(a_ , "quantization_config" ) ) __snake_case = config.to_dict() __snake_case = config.to_diff_dict() __snake_case = config.to_json_string() def A ( self : Tuple ): """simple docstring""" from bitsandbytes.nn import Paramsabit __snake_case = self.model_fpaa.get_memory_footprint() __snake_case = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __snake_case = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def A ( self : Optional[int] ): """simple docstring""" from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(a_ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def A ( self : Optional[int] ): """simple docstring""" __snake_case = self.tokenizer(self.input_text , return_tensors="pt" ) __snake_case = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a_ ) , self.EXPECTED_OUTPUTS ) def A ( self : Tuple ): """simple docstring""" __snake_case = BitsAndBytesConfig() __snake_case = True __snake_case = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=a_ , device_map="auto" ) __snake_case = self.tokenizer(self.input_text , return_tensors="pt" ) __snake_case = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a_ ) , self.EXPECTED_OUTPUTS ) def A ( self : str ): """simple docstring""" with self.assertRaises(a_ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(a_ ) def A ( self : Tuple ): """simple docstring""" __snake_case = BitsAndBytesConfig() with self.assertRaises(a_ ): __snake_case = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=a_ , load_in_abit=a_ , device_map="auto" , bnb_abit_quant_type="nf4" , ) def A ( self : Tuple ): """simple docstring""" with self.assertRaises(a_ ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(a_ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(a_ ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(a_ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(a_ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __snake_case = self.tokenizer(self.input_text , return_tensors="pt" ) __snake_case = self.model_fpaa.to(torch.floataa ) __snake_case = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __snake_case = self.model_fpaa.to("cpu" ) # Check this does not throw an error __snake_case = self.model_fpaa.half() # Check this does not throw an error __snake_case = self.model_fpaa.float() def A ( self : str ): """simple docstring""" __snake_case = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=a_ , device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @classmethod def A ( cls : Union[str, Any] ): """simple docstring""" __snake_case = "t5-small" __snake_case = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense __snake_case = AutoTokenizer.from_pretrained(cls.model_name ) __snake_case = "Translate in German: Hello, my dog is cute" def A ( self : Optional[Any] ): """simple docstring""" gc.collect() torch.cuda.empty_cache() def A ( self : Optional[Any] ): """simple docstring""" from transformers import TaForConditionalGeneration __snake_case = TaForConditionalGeneration._keep_in_fpaa_modules __snake_case = None # test with `t5-small` __snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" ) __snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) __snake_case = model.generate(**a_ ) # test with `flan-t5-small` __snake_case = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=a_ , device_map="auto" ) __snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) __snake_case = model.generate(**a_ ) __snake_case = modules def A ( self : str ): """simple docstring""" import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) __snake_case = model.generate(**a_ ) # test with `flan-t5-small` __snake_case = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=a_ , device_map="auto" ) __snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) __snake_case = model.generate(**a_ ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : str ): """simple docstring""" super().setUp() # model_name __snake_case = "bigscience/bloom-560m" __snake_case = "t5-small" # Different types of model __snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" ) # Sequence classification model __snake_case = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=a_ , device_map="auto" ) # CausalLM model __snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" ) # Seq2seq model __snake_case = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=a_ , device_map="auto" ) def A ( self : List[str] ): """simple docstring""" del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def A ( self : int ): """simple docstring""" from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : str ): """simple docstring""" super().setUp() def A ( self : Dict ): """simple docstring""" del self.pipe gc.collect() torch.cuda.empty_cache() def A ( self : str ): """simple docstring""" __snake_case = pipeline( "text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __snake_case = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : Union[str, Any] ): """simple docstring""" super().setUp() def A ( self : Dict ): """simple docstring""" __snake_case = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=a_ , device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __snake_case = self.tokenizer(self.input_text , return_tensors="pt" ) # Second real batch __snake_case = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a_ ) , self.EXPECTED_OUTPUTS ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : Optional[Any] ): """simple docstring""" __snake_case = "facebook/opt-350m" super().setUp() def A ( self : str ): """simple docstring""" if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters __snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a_ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __snake_case = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __snake_case = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(a_ ) ): __snake_case = LoRALayer(module.q_proj , rank=16 ) __snake_case = LoRALayer(module.k_proj , rank=16 ) __snake_case = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __snake_case = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __snake_case = model.forward(**a_ ) out.logits.norm().backward() for module in model.modules(): if isinstance(a_ , a_ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(a_ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """gpt2-xl""" __SCREAMING_SNAKE_CASE = 3.3191_8548_5415_2187
680
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float: if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float: if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) a : Tuple = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ): """simple docstring""" __snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] ) __snake_case = layer_outputs[0] return hidden_states @add_start_docstrings( """The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : int , a_ : int ): """simple docstring""" super().__init__(a_ ) __snake_case = BertEncoderWithPabee(a_ ) self.init_weights() __snake_case = 0 __snake_case = 0 __snake_case = 0 __snake_case = 0 def A ( self : Optional[int] , a_ : Union[str, Any] ): """simple docstring""" __snake_case = threshold def A ( self : Optional[Any] , a_ : Union[str, Any] ): """simple docstring""" __snake_case = patience def A ( self : Any ): """simple docstring""" __snake_case = 0 __snake_case = 0 def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = self.inference_layers_num / self.inference_instances_num __snake_case = ( f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =''' f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***''' ) print(a_ ) @add_start_docstrings_to_model_forward(a_ ) def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ): """simple docstring""" if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: __snake_case = input_ids.size() elif inputs_embeds is not None: __snake_case = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) __snake_case = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __snake_case = torch.ones(a_ , device=a_ ) if token_type_ids is None: __snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __snake_case = self.get_extended_attention_mask(a_ , a_ , a_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: __snake_case , __snake_case , __snake_case = encoder_hidden_states.size() __snake_case = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: __snake_case = torch.ones(a_ , device=a_ ) __snake_case = self.invert_attention_mask(a_ ) else: __snake_case = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers ) __snake_case = self.embeddings( input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ ) __snake_case = embedding_output if self.training: __snake_case = [] for i in range(self.config.num_hidden_layers ): __snake_case = self.encoder.adaptive_forward( a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ ) __snake_case = self.pooler(a_ ) __snake_case = output_layers[i](output_dropout(a_ ) ) res.append(a_ ) elif self.patience == 0: # Use all layers for inference __snake_case = self.encoder( a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , ) __snake_case = self.pooler(encoder_outputs[0] ) __snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )] else: __snake_case = 0 __snake_case = None __snake_case = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 __snake_case = self.encoder.adaptive_forward( a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ ) __snake_case = self.pooler(a_ ) __snake_case = output_layers[i](a_ ) if regression: __snake_case = logits.detach() if patient_result is not None: __snake_case = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: __snake_case = 0 else: __snake_case = logits.detach().argmax(dim=1 ) if patient_result is not None: __snake_case = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(a_ ) ): patient_counter += 1 else: __snake_case = 0 __snake_case = logits if patient_counter == self.patience: break __snake_case = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( """Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , ) class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : List[str] , a_ : Tuple ): """simple docstring""" super().__init__(a_ ) __snake_case = config.num_labels __snake_case = BertModelWithPabee(a_ ) __snake_case = nn.Dropout(config.hidden_dropout_prob ) __snake_case = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(a_ ) def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ): """simple docstring""" __snake_case = self.bert( input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) __snake_case = (logits[-1],) if labels is not None: __snake_case = None __snake_case = 0 for ix, logits_item in enumerate(a_ ): if self.num_labels == 1: # We are doing regression __snake_case = MSELoss() __snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: __snake_case = CrossEntropyLoss() __snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: __snake_case = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 __snake_case = (total_loss / total_weights,) + outputs return outputs
680
'''simple docstring''' from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance a : Any = 6_378_137.0 a : List[Any] = 6_356_752.314_245 a : Dict = 6_378_137 def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float: __snake_case = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values __snake_case = (b_lata + b_lata) / 2 __snake_case = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2) __snake_case = cos(sigma / 2 ) ** 2 __snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2) __snake_case = sin(sigma / 2 ) ** 2 __snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType a : List[Any] = logging.get_logger(__name__) a : Optional[int] = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """layoutlmv3""" def __init__( self : List[Any] , a_ : Optional[int]=50_265 , a_ : Union[str, Any]=768 , a_ : List[str]=12 , a_ : Optional[int]=12 , a_ : Any=3_072 , a_ : Tuple="gelu" , a_ : Tuple=0.1 , a_ : str=0.1 , a_ : Union[str, Any]=512 , a_ : Optional[int]=2 , a_ : str=0.02 , a_ : List[Any]=1e-5 , a_ : str=1 , a_ : str=0 , a_ : Tuple=2 , a_ : Dict=1_024 , a_ : Any=128 , a_ : Optional[Any]=128 , a_ : Optional[int]=True , a_ : List[Any]=32 , a_ : List[Any]=128 , a_ : Dict=64 , a_ : Tuple=256 , a_ : List[str]=True , a_ : List[str]=True , a_ : Optional[Any]=True , a_ : Dict=224 , a_ : Dict=3 , a_ : List[Any]=16 , a_ : Any=None , **a_ : Optional[Any] , ): """simple docstring""" super().__init__( vocab_size=a_ , hidden_size=a_ , num_hidden_layers=a_ , num_attention_heads=a_ , intermediate_size=a_ , hidden_act=a_ , hidden_dropout_prob=a_ , attention_probs_dropout_prob=a_ , max_position_embeddings=a_ , type_vocab_size=a_ , initializer_range=a_ , layer_norm_eps=a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , ) __snake_case = max_ad_position_embeddings __snake_case = coordinate_size __snake_case = shape_size __snake_case = has_relative_attention_bias __snake_case = rel_pos_bins __snake_case = max_rel_pos __snake_case = has_spatial_attention_bias __snake_case = rel_ad_pos_bins __snake_case = max_rel_ad_pos __snake_case = text_embed __snake_case = visual_embed __snake_case = input_size __snake_case = num_channels __snake_case = patch_size __snake_case = classifier_dropout class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = version.parse("""1.12""" ) @property def A ( self : List[Any] ): """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) else: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) @property def A ( self : int ): """simple docstring""" return 1e-5 @property def A ( self : Tuple ): """simple docstring""" return 12 def A ( self : List[str] , a_ : "ProcessorMixin" , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional["TensorType"] = None , a_ : int = 3 , a_ : int = 40 , a_ : int = 40 , ): """simple docstring""" setattr(processor.image_processor , "apply_ocr" , a_ ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __snake_case = compute_effective_axis_dimension( a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __snake_case = processor.tokenizer.num_special_tokens_to_add(a_ ) __snake_case = compute_effective_axis_dimension( a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ ) # Generate dummy inputs according to compute batch and sequence __snake_case = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes __snake_case = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) __snake_case = self._generate_dummy_images(a_ , a_ , a_ , a_ ) __snake_case = dict( processor( a_ , text=a_ , boxes=a_ , return_tensors=a_ , ) ) return inputs
680
'''simple docstring''' import math import sys import cva import numpy as np def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray: # For applying gaussian function for each element in matrix. __snake_case = math.sqrt(_UpperCAmelCase ) __snake_case = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray: __snake_case = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray: # Creates a gaussian kernel of given dimension. __snake_case = np.zeros((kernel_size, kernel_size) ) for i in range(0 , _UpperCAmelCase ): for j in range(0 , _UpperCAmelCase ): __snake_case = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray: __snake_case = np.zeros(img.shape ) __snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase ) __snake_case , __snake_case = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): __snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2] __snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase ) __snake_case = val return imga def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple: __snake_case = args[1] if args[1:] else "../image_data/lena.jpg" __snake_case = float(args[2] ) if args[2:] else 1.0 __snake_case = float(args[3] ) if args[3:] else 1.0 if args[4:]: __snake_case = int(args[4] ) __snake_case = kernel_size + abs(kernel_size % 2 - 1 ) else: __snake_case = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": a , a , a , a : Tuple = parse_args(sys.argv) a : Tuple = cva.imread(filename, 0) cva.imshow('''input image''', img) a : Dict = img / 255 a : str = out.astype('''float32''') a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) a : Dict = out * 255 a : List[str] = np.uinta(out) cva.imshow('''output image''', out) cva.waitKey(0) cva.destroyAllWindows()
680
1
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge a : Union[str, Any] = [ '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the''' ''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe''' ''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''', '''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal''' ''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s''' ''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the''' ''' body.''', '''Amnesty International releases its annual report on the death penalty. The report catalogs the use of''' ''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the''' ''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital''' ''' punishment.''', ] a : Union[str, Any] = [ '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''' ''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz''' ''' had informed his Lufthansa training school of an episode of severe depression, airline says .''', '''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .''' ''' Israel and the United States opposed the move, which could open the door to war crimes investigations against''' ''' Israelis .''', '''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to''' ''' death . Organization claims that governments around the world are using the threat of terrorism to advance''' ''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death''' ''' sentences up by 28% .''', ] def __UpperCAmelCase ( ) -> Any: __snake_case = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , bootstrap_aggregation=_UpperCAmelCase , rouge_keys=["rouge2", "rougeL"] ) assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , bootstrap_aggregation=_UpperCAmelCase , rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def __UpperCAmelCase ( ) -> Any: __snake_case = "rougeLsum" __snake_case = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase , rouge_keys=[k] )[k] __snake_case = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase , rouge_keys=[k] )[k] assert score > score_no_sep def __UpperCAmelCase ( ) -> Dict: __snake_case = ["rouge1", "rouge2", "rougeL"] __snake_case = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase , rouge_keys=_UpperCAmelCase ) __snake_case = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase , rouge_keys=_UpperCAmelCase ) assert score_sep == score_no_sep def __UpperCAmelCase ( ) -> int: __snake_case = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] __snake_case = [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase ) == calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , newline_sep=_UpperCAmelCase ) def __UpperCAmelCase ( ) -> Tuple: __snake_case = [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] __snake_case = [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] __snake_case = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , rouge_keys=["rougeLsum"] , newline_sep=_UpperCAmelCase )["rougeLsum"] __snake_case = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def __UpperCAmelCase ( ) -> List[str]: __snake_case = Path("examples/seq2seq/test_data/wmt_en_ro" ) __snake_case = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) ) assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = calculate_rouge_path( data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=_UpperCAmelCase ) assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
680
'''simple docstring''' class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ): """simple docstring""" __snake_case = name __snake_case = value __snake_case = weight def __repr__( self : Optional[int] ): """simple docstring""" return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def A ( self : Any ): """simple docstring""" return self.value def A ( self : str ): """simple docstring""" return self.name def A ( self : int ): """simple docstring""" return self.weight def A ( self : Tuple ): """simple docstring""" return self.value / self.weight def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]: __snake_case = [] for i in range(len(_UpperCAmelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int: __snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase ) __snake_case = [] __snake_case , __snake_case = 0.0, 0.0 for i in range(len(_UpperCAmelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __UpperCAmelCase ( ) -> Optional[Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
680
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ) -> str: __snake_case = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_UpperCAmelCase ) __snake_case = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=_UpperCAmelCase ) env_command_parser(subparsers=_UpperCAmelCase ) launch_command_parser(subparsers=_UpperCAmelCase ) tpu_command_parser(subparsers=_UpperCAmelCase ) test_command_parser(subparsers=_UpperCAmelCase ) # Let's go __snake_case = parser.parse_args() if not hasattr(_UpperCAmelCase , "func" ): parser.print_help() exit(1 ) # Run args.func(_UpperCAmelCase ) if __name__ == "__main__": main()
680
'''simple docstring''' import os from math import logaa def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int: __snake_case = 0 __snake_case = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ): __snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) ) if x * logaa(_UpperCAmelCase ) > largest: __snake_case = x * logaa(_UpperCAmelCase ) __snake_case = i + 1 return result if __name__ == "__main__": print(solution())
680
1
'''simple docstring''' import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def __UpperCAmelCase ( _UpperCAmelCase : str ) -> List[Any]: return EnvironmentCommand() class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): @staticmethod def A ( a_ : ArgumentParser ): """simple docstring""" __snake_case = parser.add_parser("env" ) download_parser.set_defaults(func=a_ ) def A ( self : int ): """simple docstring""" __snake_case = huggingface_hub.__version__ __snake_case = "not installed" __snake_case = "NA" if is_torch_available(): import torch __snake_case = torch.__version__ __snake_case = torch.cuda.is_available() __snake_case = "not installed" if is_transformers_available(): import transformers __snake_case = transformers.__version__ __snake_case = "not installed" if is_accelerate_available(): import accelerate __snake_case = accelerate.__version__ __snake_case = "not installed" if is_xformers_available(): import xformers __snake_case = xformers.__version__ __snake_case = { "`diffusers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''', "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "xFormers version": xformers_version, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(a_ ) ) return info @staticmethod def A ( a_ : List[str] ): """simple docstring""" return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
680
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer a : List[Any] = logging.get_logger(__name__) a : Dict = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } a : Any = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } a : Optional[int] = { '''facebook/blenderbot_small-90M''': 512, } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ): """simple docstring""" super().__init__( ByteLevelBPETokenizer( vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , ) __snake_case = add_prefix_space def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ): """simple docstring""" __snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ): """simple docstring""" __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
680
1