code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from collections import Counter from timeit import timeit def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = "" , ): """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = "" ): """simple docstring""" if len(_UpperCamelCase ) == 0: return True lowercase_ : Any = input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string lowercase_ : Dict = {} for character in lower_case_input_str: lowercase_ : Tuple = character_freq_dict.get(_UpperCamelCase , 0 ) + 1 lowercase_ : str = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = "" ): """simple docstring""" print("\nFor string = " , _UpperCamelCase , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_UpperCamelCase ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_UpperCamelCase ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": UpperCamelCase__ = input( 'Enter string to determine if it can be rearranged as a palindrome or not: ' ).strip() benchmark(check_str) UpperCamelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str) print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
620
'''simple docstring''' from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class _UpperCAmelCase ( lowerCAmelCase_ ): a : Union[str, Any] =["""image_processor"""] a : Dict ="""SamImageProcessor""" def __init__( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.image_processor __lowerCAmelCase = -10 __lowerCAmelCase = self.image_processor.size["""longest_edge"""] def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = self.image_processor( __SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,) # pop arguments that are not used in the foward but used nevertheless __lowerCAmelCase = encoding_image_processor["""original_sizes"""] if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor __lowerCAmelCase = original_sizes.numpy() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points( input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = self._normalize_and_convert( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,) return encoding_image_processor def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",): '''simple docstring''' if input_points is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points ] else: __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: __lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_labels is not None: __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_boxes is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE ) for box in input_boxes ] else: __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE ) for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) ] __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_boxes is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # boxes batch size of 1 by default __lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # boxes batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"""input_boxes""": input_boxes} ) if input_points is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"""input_points""": input_points} ) if input_labels is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"""input_labels""": input_labels} ) return encoding_image_processor def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = max([point.shape[0] for point in input_points] ) __lowerCAmelCase = [] for i, point in enumerate(__SCREAMING_SNAKE_CASE ): if point.shape[0] != expected_nb_points: __lowerCAmelCase = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 ) __lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] ) processed_input_points.append(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = processed_input_points return input_points, input_labels def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase = original_size __lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE ) if is_bounding_box: __lowerCAmelCase = coords.reshape(-1,2,2 ) __lowerCAmelCase = coords[..., 0] * (new_w / old_w) __lowerCAmelCase = coords[..., 1] * (new_h / old_h) if is_bounding_box: __lowerCAmelCase = coords.reshape(-1,4 ) return coords def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,): '''simple docstring''' if input_points is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor __lowerCAmelCase = input_points.numpy().tolist() if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ): raise ValueError("""Input points must be a list of list of floating points.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points] else: __lowerCAmelCase = None if input_labels is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): __lowerCAmelCase = input_labels.numpy().tolist() if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ): raise ValueError("""Input labels must be a list of list integers.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels] else: __lowerCAmelCase = None if input_boxes is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): __lowerCAmelCase = input_boxes.numpy().tolist() if ( not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE ) or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE ) ): raise ValueError("""Input boxes must be a list of list of list of floating points.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes] else: __lowerCAmelCase = None return input_points, input_labels, input_boxes @property def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) ) def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
689
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCAmelCase_ : Any = logging.get_logger(__name__) if is_vision_available(): import PIL class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): __magic_name__ : str = ["""pixel_values"""] def __init__( self : int , lowercase__ : List[Any] = True , lowercase__ : List[str] = None , lowercase__ : Union[str, Any] = PILImageResampling.BICUBIC , lowercase__ : Any = True , lowercase__ : Optional[int] = None , lowercase__ : Any = True , lowercase__ : Any = 1 / 255 , lowercase__ : Union[str, Any] = True , lowercase__ : Union[str, Any] = None , lowercase__ : Union[str, Any] = None , lowercase__ : List[Any] = True , **lowercase__ : Union[str, Any] , ): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE ) a_ : Dict = size if size is not None else {"""shortest_edge""": 224} a_ : List[str] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) a_ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} a_ : Optional[int] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) a_ : int = do_resize a_ : List[str] = size a_ : List[str] = resample a_ : Tuple = do_center_crop a_ : str = crop_size a_ : Union[str, Any] = do_rescale a_ : Union[str, Any] = rescale_factor a_ : Union[str, Any] = do_normalize a_ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN a_ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD a_ : List[Any] = do_convert_rgb def lowercase_ ( self : List[str] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : int = PILImageResampling.BICUBIC , lowercase__ : int = None , **lowercase__ : List[Any] , ): '''simple docstring''' a_ : Dict = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) a_ : Any = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size["""shortest_edge"""] , default_to_square=__SCREAMING_SNAKE_CASE ) return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def lowercase_ ( self : int , lowercase__ : List[Any] , lowercase__ : str , lowercase__ : str = None , **lowercase__ : Optional[int] , ): '''simple docstring''' a_ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def lowercase_ ( self : Any , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : List[Any] = None , **lowercase__ : Union[str, Any] , ): '''simple docstring''' return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def lowercase_ ( self : Dict , lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Dict = None , **lowercase__ : Optional[int] , ): '''simple docstring''' return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def lowercase_ ( self : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Dict = None , lowercase__ : Union[str, Any] = None , lowercase__ : str = None , lowercase__ : Tuple = None , lowercase__ : List[str] = None , lowercase__ : Tuple = None , lowercase__ : Optional[int] = None , lowercase__ : Optional[Any] = None , lowercase__ : Union[str, Any] = None , lowercase__ : Optional[Any] = None , lowercase__ : Any = None , lowercase__ : str = None , lowercase__ : Any = ChannelDimension.FIRST , **lowercase__ : List[Any] , ): '''simple docstring''' a_ : Tuple = do_resize if do_resize is not None else self.do_resize a_ : List[str] = size if size is not None else self.size a_ : Dict = get_size_dict(__SCREAMING_SNAKE_CASE , param_name="""size""" , default_to_square=__SCREAMING_SNAKE_CASE ) a_ : Any = resample if resample is not None else self.resample a_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop a_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size a_ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , param_name="""crop_size""" , default_to_square=__SCREAMING_SNAKE_CASE ) a_ : int = do_rescale if do_rescale is not None else self.do_rescale a_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor a_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize a_ : int = image_mean if image_mean is not None else self.image_mean a_ : Optional[int] = image_std if image_std is not None else self.image_std a_ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb a_ : Tuple = make_list_of_images(__SCREAMING_SNAKE_CASE ) if not valid_images(__SCREAMING_SNAKE_CASE ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: a_ : Any = [convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images] # All transformations expect numpy arrays. a_ : Dict = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images] if do_resize: a_ : List[str] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: a_ : str = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: a_ : Any = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: a_ : int = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images] a_ : List[str] = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images] a_ : str = {"""pixel_values""": images} return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
442
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.17.0.dev0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") _a : int = logging.getLogger(__name__) @dataclass class _UpperCAmelCase : a : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) a : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , ) a : int =field( default=10_24 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) a : bool =field( default=lowerCAmelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} ) a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} ) def lowerCamelCase__ ( self ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: __lowerCAmelCase = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __lowerCAmelCase = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _UpperCAmelCase : a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a : str =field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a : bool =field( default=lowerCAmelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def _lowerCAmelCase ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) __lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(lowercase ) datasets.utils.logging.set_verbosity(lowercase ) transformers.utils.logging.set_verbosity(lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __lowerCAmelCase = data_args.train_file.split(""".""" )[-1] __lowerCAmelCase = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __lowerCAmelCase = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f'load a local file for {key}: {data_files[key]}' ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files __lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names __lowerCAmelCase = len(lowercase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __lowerCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , ) __lowerCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __lowerCAmelCase = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __lowerCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. __lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1} __lowerCAmelCase = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(lowercase ): # Tokenize the texts def _convert_table_text_to_pandas(lowercase ): __lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] __lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __lowerCAmelCase = examples["""statement"""] __lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) __lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase ) __lowerCAmelCase = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): __lowerCAmelCase = raw_datasets.map( lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) __lowerCAmelCase = raw_datasets["""train"""] if data_args.max_train_samples is not None: __lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) __lowerCAmelCase = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: __lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) __lowerCAmelCase = raw_datasets["""test"""] if data_args.max_predict_samples is not None: __lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(lowercase ) ) , 3 ): logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowercase ): __lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions __lowerCAmelCase = np.argmax(lowercase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __lowerCAmelCase = default_data_collator elif training_args.fpaa: __lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 ) else: __lowerCAmelCase = None # Initialize our Trainer __lowerCAmelCase = Trainer( model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , ) # Training if training_args.do_train: __lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: __lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCAmelCase = last_checkpoint __lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase ) __lowerCAmelCase = train_result.metrics __lowerCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase ) ) __lowerCAmelCase = min(lowercase , len(lowercase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , lowercase ) trainer.save_metrics("""train""" , lowercase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase ) __lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase ) __lowerCAmelCase = min(lowercase , len(lowercase ) ) trainer.log_metrics("""eval""" , lowercase ) trainer.save_metrics("""eval""" , lowercase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __lowerCAmelCase = predict_dataset.remove_columns("""label""" ) __lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions __lowerCAmelCase = np.argmax(lowercase , axis=1 ) __lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(lowercase , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(lowercase ): __lowerCAmelCase = label_list[item] writer.write(f'{index}\t{item}\n' ) __lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**lowercase ) else: trainer.create_model_card(**lowercase ) def _lowerCAmelCase ( lowercase ) -> str: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
689
0
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any: '''simple docstring''' snake_case : List[str] = tempfile.mkdtemp() # fmt: off snake_case : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: on snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) snake_case : Tuple = { "do_resize": True, "size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } snake_case : Union[str, Any] = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE (self : int , **snake_case__ : str ) -> Any: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE (self : Tuple , **snake_case__ : Any ) -> List[Any]: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' snake_case : Tuple = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] snake_case : Dict = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]: '''simple docstring''' snake_case : Optional[int] = self.get_tokenizer() snake_case : Optional[Any] = self.get_image_processor() snake_case : List[str] = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) processor.save_pretrained(self.tmpdirname ) snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Tuple: '''simple docstring''' snake_case : List[Any] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) snake_case : str = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 ) snake_case : int = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]: '''simple docstring''' snake_case : Optional[int] = self.get_image_processor() snake_case : Optional[Any] = self.get_tokenizer() snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) snake_case : int = self.prepare_image_inputs() snake_case : Union[str, Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="np" ) snake_case : str = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict: '''simple docstring''' snake_case : List[Any] = self.get_image_processor() snake_case : Union[str, Any] = self.get_tokenizer() snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) snake_case : Union[str, Any] = "lower newer" snake_case : List[str] = processor(text=__SCREAMING_SNAKE_CASE ) snake_case : Dict = tokenizer(__SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]: '''simple docstring''' snake_case : Optional[Any] = self.get_image_processor() snake_case : str = self.get_tokenizer() snake_case : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) snake_case : str = "lower newer" snake_case : str = self.prepare_image_inputs() snake_case : Dict = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with self.assertRaises(__SCREAMING_SNAKE_CASE ): processor() def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Dict: '''simple docstring''' snake_case : Union[str, Any] = self.get_image_processor() snake_case : Dict = self.get_tokenizer() snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case : Dict = processor.batch_decode(__SCREAMING_SNAKE_CASE ) snake_case : Dict = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' snake_case : int = self.get_image_processor() snake_case : Tuple = self.get_tokenizer() snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) snake_case : List[Any] = "lower newer" snake_case : Tuple = self.prepare_image_inputs() snake_case : Union[str, Any] = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
204
'''simple docstring''' import os import sys import unittest _a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""") class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = find_backend(""" if not is_torch_available():""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") __lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") __lowerCAmelCase = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""",__SCREAMING_SNAKE_CASE ) self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE ) self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE ) self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""",objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" ) __lowerCAmelCase = create_dummy_object("""function""","""'torch'""" ) self.assertEqual( __SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) __lowerCAmelCase = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ __lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ __lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
689
0
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def __snake_case ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' lowercase = StableDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors lowercase = load_file(__magic_name__ ) lowercase = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: lowercase = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" ) lowercase = pipeline.text_encoder else: lowercase = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" ) lowercase = pipeline.unet # find the target layer lowercase = layer_infos.pop(0 ) while len(__magic_name__ ) > -1: try: lowercase = curr_layer.__getattr__(__magic_name__ ) if len(__magic_name__ ) > 0: lowercase = layer_infos.pop(0 ) elif len(__magic_name__ ) == 0: break except Exception: if len(__magic_name__ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: lowercase = layer_infos.pop(0 ) lowercase = [] if "lora_down" in key: pair_keys.append(key.replace("lora_down" , "lora_up" ) ) pair_keys.append(__magic_name__ ) else: pair_keys.append(__magic_name__ ) pair_keys.append(key.replace("lora_up" , "lora_down" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: lowercase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) lowercase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ ).unsqueeze(2 ).unsqueeze(3 ) else: lowercase = state_dict[pair_keys[0]].to(torch.floataa ) lowercase = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ ) # update visited list for item in pair_keys: visited.append(__magic_name__ ) return pipeline if __name__ == "__main__": _snake_case : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format." ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors" ) parser.add_argument( "--lora_prefix_text_encoder", default="lora_te", type=str, help="The prefix of text encoder weight in safetensors", ) parser.add_argument("--alpha", default=0.7_5, type=float, help="The merging ratio in W = W0 + alpha * deltaW") parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not." ) parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") _snake_case : Optional[int] = parser.parse_args() _snake_case : Dict = args.base_model_path _snake_case : Optional[Any] = args.checkpoint_path _snake_case : Union[str, Any] = args.dump_path _snake_case : Optional[int] = args.lora_prefix_unet _snake_case : int = args.lora_prefix_text_encoder _snake_case : str = args.alpha _snake_case : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _snake_case : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
441
'''simple docstring''' def _lowerCAmelCase ( lowercase ) -> tuple[int, int]: try: __lowerCAmelCase = float(lowercase ) except ValueError: raise ValueError("""Please enter a valid number""" ) __lowerCAmelCase = decimal - int(lowercase ) if fractional_part == 0: return int(lowercase ), 1 else: __lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] ) __lowerCAmelCase = int(decimal * (10**number_of_frac_digits) ) __lowerCAmelCase = 10**number_of_frac_digits __lowerCAmelCase , __lowerCAmelCase = denominator, numerator while True: __lowerCAmelCase = dividend % divisor if remainder == 0: break __lowerCAmelCase , __lowerCAmelCase = divisor, remainder __lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor return int(lowercase ), int(lowercase ) if __name__ == "__main__": print(f'{decimal_to_fraction(2) = }') print(f'{decimal_to_fraction(89.0) = }') print(f'{decimal_to_fraction("67") = }') print(f'{decimal_to_fraction("45.0") = }') print(f'{decimal_to_fraction(1.5) = }') print(f'{decimal_to_fraction("6.25") = }') print(f'{decimal_to_fraction("78td") = }')
689
0
'''simple docstring''' import numpy as np class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self ): """simple docstring""" a_ = (0, 0) a_ = None a_ = 0 a_ = 0 a_ = 0 def __eq__( self , UpperCamelCase__ ): """simple docstring""" return self.position == cell.position def _a ( self ): """simple docstring""" print(self.position ) class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , UpperCamelCase__=(5, 5) ): """simple docstring""" a_ = np.zeros(__SCREAMING_SNAKE_CASE ) a_ = world_size[0] a_ = world_size[1] def _a ( self ): """simple docstring""" print(self.w ) def _a ( self , UpperCamelCase__ ): """simple docstring""" a_ = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] a_ = cell.position[0] a_ = cell.position[1] a_ = [] for n in neughbour_cord: a_ = current_x + n[0] a_ = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: a_ = Cell() a_ = (x, y) a_ = cell neighbours.append(__SCREAMING_SNAKE_CASE ) return neighbours def __UpperCamelCase ( lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[Any] ): """simple docstring""" a_ = [] a_ = [] _open.append(lowercase_ ) while _open: a_ = np.argmin([n.f for n in _open] ) a_ = _open[min_f] _closed.append(_open.pop(lowercase_ ) ) if current == goal: break for n in world.get_neigbours(lowercase_ ): for c in _closed: if c == n: continue a_ = current.g + 1 a_ , a_ = n.position a_ , a_ = goal.position a_ = (ya - ya) ** 2 + (xa - xa) ** 2 a_ = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(lowercase_ ) a_ = [] while current.parent is not None: path.append(current.position ) a_ = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": __lowerCAmelCase = Gridworld() # Start position and goal __lowerCAmelCase = Cell() __lowerCAmelCase = (0, 0) __lowerCAmelCase = Cell() __lowerCAmelCase = (4, 4) print(f"""path from {start.position} to {goal.position}""") __lowerCAmelCase = astar(world, start, goal) # Just for visual reasons. for i in s: __lowerCAmelCase = 1 print(world.w)
536
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _a : Dict = _symbol_database.Default() _a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile( b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03""" ) _a : str = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals) if _descriptor._USE_C_DESCRIPTORS is False: _a : str = None _a : Union[str, Any] = b"""H\003""" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _a : Optional[int] = 4_5 _a : List[Any] = 1_5_8_1 _a : str = 1_5_1_7 _a : Optional[Any] = 1_5_7_0 _a : List[str] = 1_5_8_4 _a : List[Any] = 1_7_9_3 _a : Union[str, Any] = 1_7_9_5 _a : Tuple = 1_9_1_6 _a : List[Any] = 1_8_6_4 _a : Any = 1_9_0_5 _a : Optional[Any] = 1_9_1_9 _a : Optional[int] = 2_4_2_9 _a : Tuple = 2_2_0_8 _a : Optional[Any] = 2_4_1_8 _a : List[Any] = 2_3_2_3 _a : str = 2_4_0_7 # @@protoc_insertion_point(module_scope)
689
0
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return (gray > 127) & (gray <= 255) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = np.zeros_like(_UpperCamelCase ) __lowerCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image __lowerCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): __lowerCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() __lowerCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image A : Any = Path(__file__).resolve().parent / """image_data""" / """lena.jpg""" A : Optional[int] = np.array(Image.open(lena_path)) # kernel to be applied A : Dict = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) A : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image A : List[Any] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
636
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class _UpperCAmelCase ( lowerCAmelCase_ ): a : torch.FloatTensor class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,): '''simple docstring''' super().__init__() __lowerCAmelCase = layers_per_block __lowerCAmelCase = torch.nn.Convad( __SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,) __lowerCAmelCase = None __lowerCAmelCase = nn.ModuleList([] ) # down __lowerCAmelCase = block_out_channels[0] for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = output_channel __lowerCAmelCase = block_out_channels[i] __lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1 __lowerCAmelCase = get_down_block( __SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) self.down_blocks.append(__SCREAMING_SNAKE_CASE ) # mid __lowerCAmelCase = UNetMidBlockaD( in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) # out __lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 ) __lowerCAmelCase = nn.SiLU() __lowerCAmelCase = 2 * out_channels if double_z else out_channels __lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 ) __lowerCAmelCase = False def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = x __lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE ) if self.training and self.gradient_checkpointing: def create_custom_forward(__SCREAMING_SNAKE_CASE ): def custom_forward(*__SCREAMING_SNAKE_CASE ): return module(*__SCREAMING_SNAKE_CASE ) return custom_forward # down if is_torch_version(""">=""","""1.11.0""" ): for down_block in self.down_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) else: for down_block in self.down_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE ) else: # down for down_block in self.down_blocks: __lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE ) # post-process __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE ) return sample class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",): '''simple docstring''' super().__init__() __lowerCAmelCase = layers_per_block __lowerCAmelCase = nn.Convad( __SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,) __lowerCAmelCase = None __lowerCAmelCase = nn.ModuleList([] ) __lowerCAmelCase = in_channels if norm_type == """spatial""" else None # mid __lowerCAmelCase = UNetMidBlockaD( in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) # up __lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = output_channel __lowerCAmelCase = reversed_block_out_channels[i] __lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1 __lowerCAmelCase = get_up_block( __SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,) self.up_blocks.append(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = output_channel # out if norm_type == "spatial": __lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 ) __lowerCAmelCase = nn.SiLU() __lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 ) __lowerCAmelCase = False def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' __lowerCAmelCase = z __lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(__SCREAMING_SNAKE_CASE ): def custom_forward(*__SCREAMING_SNAKE_CASE ): return module(*__SCREAMING_SNAKE_CASE ) return custom_forward if is_torch_version(""">=""","""1.11.0""" ): # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) else: # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) else: # middle __lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) # post-process if latent_embeds is None: __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE ) return sample class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ): '''simple docstring''' super().__init__() __lowerCAmelCase = n_e __lowerCAmelCase = vq_embed_dim __lowerCAmelCase = beta __lowerCAmelCase = legacy __lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e ) __lowerCAmelCase = remap if self.remap is not None: self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) ) __lowerCAmelCase = self.used.shape[0] __lowerCAmelCase = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": __lowerCAmelCase = self.re_embed __lowerCAmelCase = self.re_embed + 1 print( f'Remapping {self.n_e} indices to {self.re_embed} indices. ' f'Using {self.unknown_index} for unknown indices.' ) else: __lowerCAmelCase = n_e __lowerCAmelCase = sane_index_shape def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = inds.shape assert len(__SCREAMING_SNAKE_CASE ) > 1 __lowerCAmelCase = inds.reshape(ishape[0],-1 ) __lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long() __lowerCAmelCase = match.argmax(-1 ) __lowerCAmelCase = match.sum(2 ) < 1 if self.unknown_index == "random": __lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device ) else: __lowerCAmelCase = self.unknown_index return new.reshape(__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = inds.shape assert len(__SCREAMING_SNAKE_CASE ) > 1 __lowerCAmelCase = inds.reshape(ishape[0],-1 ) __lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE ) if self.re_embed > self.used.shape[0]: # extra token __lowerCAmelCase = 0 # simply set to zero __lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE ) return back.reshape(__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = z.permute(0,2,3,1 ).contiguous() __lowerCAmelCase = z.view(-1,self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z __lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 ) __lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape ) __lowerCAmelCase = None __lowerCAmelCase = None # compute loss for embedding if not self.legacy: __lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: __lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients __lowerCAmelCase = z + (z_q - z).detach() # reshape back to match original input shape __lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous() if self.remap is not None: __lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis __lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten if self.sane_index_shape: __lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): '''simple docstring''' if self.remap is not None: __lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis __lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = indices.reshape(-1 ) # flatten again # get quantized latent vectors __lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ) if shape is not None: __lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE ) # reshape back to match original input shape __lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous() return z_q class _UpperCAmelCase ( lowerCAmelCase_ ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase = parameters __lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 ) __lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 ) __lowerCAmelCase = deterministic __lowerCAmelCase = torch.exp(0.5 * self.logvar ) __lowerCAmelCase = torch.exp(self.logvar ) if self.deterministic: __lowerCAmelCase = __lowerCAmelCase = torch.zeros_like( self.mean,device=self.parameters.device,dtype=self.parameters.dtype ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ): '''simple docstring''' __lowerCAmelCase = randn_tensor( self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype ) __lowerCAmelCase = self.mean + self.std * sample return x def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean,2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar,dim=[1, 2, 3],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) __lowerCAmelCase = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): '''simple docstring''' return self.mean
689
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: __snake_case = None __snake_case = logging.get_logger(__name__) __snake_case = """▁""" __snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} __snake_case = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } __snake_case = { """google/pegasus-xsum""": 512, } class _lowerCAmelCase ( lowerCAmelCase_ ): __UpperCAmelCase : str = VOCAB_FILES_NAMES __UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Union[str, Any] = PegasusTokenizer __UpperCAmelCase : List[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<pad>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<mask_2>" , UpperCamelCase__="<mask_1>" , UpperCamelCase__=None , UpperCamelCase__=103 , **UpperCamelCase__ , ) -> Dict: '''simple docstring''' snake_case : List[Any] = offset if additional_special_tokens is not None: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError( F'additional_special_tokens should be of type {type(__SCREAMING_SNAKE_CASE )}, but is' F' {type(__SCREAMING_SNAKE_CASE )}' ) snake_case : List[str] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F'<unk_{i}>' for i in range(len(__SCREAMING_SNAKE_CASE ) , self.offset - 1 ) ] if len(set(__SCREAMING_SNAKE_CASE ) ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' ) snake_case : Any = additional_special_tokens_extended else: snake_case : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset )] super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , mask_token_sent=__SCREAMING_SNAKE_CASE , offset=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) snake_case : Dict = vocab_file snake_case : Optional[Any] = False if not self.vocab_file else True def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' snake_case : Union[str, Any] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" F' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' ) return [1 if x in all_special_ids else 0 for x in seq] def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> Dict: '''simple docstring''' if already_has_special_tokens: return self._special_token_mask(__SCREAMING_SNAKE_CASE ) elif token_ids_a is None: return self._special_token_mask(__SCREAMING_SNAKE_CASE ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Dict: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return snake_case : List[str] = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
178
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features _a : Optional[int] = logging.get_logger(__name__) _a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) _a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} ) a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} ) a : int =field( default=1_28 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : int =field( default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , ) a : int =field( default=64 , metadata={ """help""": ( """The maximum number of tokens for the question. Questions longer than this will """ """be truncated to this length.""" ) } , ) a : int =field( default=30 , metadata={ """help""": ( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ) } , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} ) a : float =field( default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) a : int =field( default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) a : int =field( default=0 , metadata={ """help""": ( """language id of input for language-specific xlm models (see""" """ tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)""" ) } , ) a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} ) class _UpperCAmelCase ( lowerCAmelCase_ ): a : Optional[Any] ="""train""" a : Optional[int] ="""dev""" class _UpperCAmelCase ( lowerCAmelCase_ ): a : SquadDataTrainingArguments a : List[SquadFeatures] a : Split a : bool def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",): '''simple docstring''' __lowerCAmelCase = args __lowerCAmelCase = is_language_sensitive __lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): try: __lowerCAmelCase = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) __lowerCAmelCase = mode # Load data features from cache or dataset file __lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1""" __lowerCAmelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowerCAmelCase = cached_features_file + """.lock""" with FileLock(__SCREAMING_SNAKE_CASE ): if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache: __lowerCAmelCase = time.time() __lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. __lowerCAmelCase = self.old_features["""features"""] __lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE ) logger.info( f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in' """ future run""" ) else: if mode == Split.dev: __lowerCAmelCase = self.processor.get_dev_examples(args.data_dir ) else: __lowerCAmelCase = self.processor.get_train_examples(args.data_dir ) __lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features( examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = time.time() torch.save( {"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = self.features[i] __lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float ) __lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float ) __lowerCAmelCase = { """input_ids""": input_ids, """attention_mask""": attention_mask, """token_type_ids""": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} ) if self.args.version_2_with_negative: inputs.update({"""is_impossible""": is_impossible} ) if self.is_language_sensitive: inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: __lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long ) inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} ) return inputs
689
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: A__ = None A__ = logging.get_logger(__name__) A__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} A__ = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", }, """tokenizer_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""", }, } A__ = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } A__ = """▁""" class a ( lowerCAmelCase_ ): __lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES __lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : str = AlbertTokenizer def __init__( self :Dict ,__lowercase :Dict=None ,__lowercase :Any=None ,__lowercase :List[str]=True ,__lowercase :Optional[Any]=True ,__lowercase :List[Any]=False ,__lowercase :Any="[CLS]" ,__lowercase :Any="[SEP]" ,__lowercase :Dict="<unk>" ,__lowercase :str="[SEP]" ,__lowercase :Dict="<pad>" ,__lowercase :Dict="[CLS]" ,__lowercase :Optional[Any]="[MASK]" ,**__lowercase :List[Any] ,): snake_case__ : Any = ( AddedToken(__SCREAMING_SNAKE_CASE ,lstrip=__SCREAMING_SNAKE_CASE ,rstrip=__SCREAMING_SNAKE_CASE ,normalized=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) else mask_token ) super().__init__( __SCREAMING_SNAKE_CASE ,tokenizer_file=__SCREAMING_SNAKE_CASE ,do_lower_case=__SCREAMING_SNAKE_CASE ,remove_space=__SCREAMING_SNAKE_CASE ,keep_accents=__SCREAMING_SNAKE_CASE ,bos_token=__SCREAMING_SNAKE_CASE ,eos_token=__SCREAMING_SNAKE_CASE ,unk_token=__SCREAMING_SNAKE_CASE ,sep_token=__SCREAMING_SNAKE_CASE ,pad_token=__SCREAMING_SNAKE_CASE ,cls_token=__SCREAMING_SNAKE_CASE ,mask_token=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,) snake_case__ : int = do_lower_case snake_case__ : Dict = remove_space snake_case__ : Any = keep_accents snake_case__ : Optional[Any] = vocab_file snake_case__ : Union[str, Any] = False if not self.vocab_file else True def __lowerCamelCase ( self :List[str] ,__lowercase :Any ,__lowercase :str = None ): snake_case__ : Any = [self.sep_token_id] snake_case__ : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCamelCase ( self :Optional[int] ,__lowercase :Any ,__lowercase :Dict = None ): snake_case__ : Optional[int] = [self.sep_token_id] snake_case__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCamelCase ( self :Optional[int] ,__lowercase :Union[str, Any] ,__lowercase :int = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case__ : Union[str, Any] = os.path.join( __SCREAMING_SNAKE_CASE ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file ,__SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
252
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _lowerCAmelCase ( lowercase ) -> Optional[Any]: # vision encoder if "img_encoder.pos_embed" in name: __lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: __lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: __lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: __lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: __lowerCAmelCase = name.replace("""blocks""" , """layers""" ) if "attn" in name and "pre_assign" not in name: __lowerCAmelCase = name.replace("""attn""" , """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: __lowerCAmelCase = name.replace("""proj""" , """out_proj""" ) if "pre_assign_attn.attn.proj" in name: __lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: __lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" ) if "img_encoder.norm" in name: __lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: __lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: __lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: __lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" ) if "ln_1" in name: __lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: __lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: __lowerCAmelCase = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: __lowerCAmelCase = name.replace("""c_proj""" , """fc2""" ) if "text_encoder" in name: __lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" ) if "ln_final" in name: __lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: __lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" ) if "img_projector.linear_out." in name: __lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: __lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" ) if "text_projector.linear_out" in name: __lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> Dict: for key in orig_state_dict.copy().keys(): __lowerCAmelCase = orig_state_dict.pop(lowercase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __lowerCAmelCase = key.split(""".""" ) __lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] ) __lowerCAmelCase = config.vision_config.hidden_size if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[dim : dim * 2, :] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val[:dim] __lowerCAmelCase = val[dim : dim * 2] __lowerCAmelCase = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __lowerCAmelCase = key.split(""".""" ) __lowerCAmelCase = int(key_split[3] ) __lowerCAmelCase = config.text_config.hidden_size if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[ dim : dim * 2, : ] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val[:dim] __lowerCAmelCase = val[dim : dim * 2] __lowerCAmelCase = val[-dim:] else: __lowerCAmelCase = rename_key(lowercase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): __lowerCAmelCase = val.squeeze_() else: __lowerCAmelCase = val return orig_state_dict def _lowerCAmelCase ( ) -> str: __lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]: __lowerCAmelCase = GroupViTConfig() __lowerCAmelCase = GroupViTModel(lowercase ).eval() __lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""] __lowerCAmelCase = convert_state_dict(lowercase , lowercase ) __lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0) # verify result __lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) __lowerCAmelCase = prepare_img() __lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" ) with torch.no_grad(): __lowerCAmelCase = model(**lowercase ) if model_name == "groupvit-gcc-yfcc": __lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] ) elif model_name == "groupvit-gcc-redcaps": __lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 ) processor.save_pretrained(lowercase ) model.save_pretrained(lowercase ) print("""Successfully saved processor and model to""" , lowercase ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(lowercase , organization="""nielsr""" ) model.push_to_hub(lowercase , organization="""nielsr""" ) if __name__ == "__main__": _a : int = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _a : List[str] = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
689
0
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _a: int = logging.get_logger(__name__) _a: Optional[int] = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class __UpperCamelCase ( lowerCAmelCase_ ): SCREAMING_SNAKE_CASE__ = """gptj""" SCREAMING_SNAKE_CASE__ = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , lowerCAmelCase : Dict=50_400 , lowerCAmelCase : str=2_048 , lowerCAmelCase : Dict=4_096 , lowerCAmelCase : Any=28 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[int]="gelu_new" , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : Optional[Any]=1e-5 , lowerCAmelCase : int=0.02 , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]=50_256 , lowerCAmelCase : Union[str, Any]=50_256 , lowerCAmelCase : List[str]=False , **lowerCAmelCase : str , ): '''simple docstring''' UpperCAmelCase_ = vocab_size UpperCAmelCase_ = n_positions UpperCAmelCase_ = n_embd UpperCAmelCase_ = n_layer UpperCAmelCase_ = n_head UpperCAmelCase_ = n_inner UpperCAmelCase_ = rotary_dim UpperCAmelCase_ = activation_function UpperCAmelCase_ = resid_pdrop UpperCAmelCase_ = embd_pdrop UpperCAmelCase_ = attn_pdrop UpperCAmelCase_ = layer_norm_epsilon UpperCAmelCase_ = initializer_range UpperCAmelCase_ = use_cache UpperCAmelCase_ = bos_token_id UpperCAmelCase_ = eos_token_id super().__init__( bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] = "default" , lowerCAmelCase : List[Any] = None , lowerCAmelCase : str = False , ): '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE ) if not getattr(self._config , "pad_token_id" , __SCREAMING_SNAKE_CASE ): # TODO: how to do that better? UpperCAmelCase_ = 0 @property def __A ( self : Tuple ): '''simple docstring''' UpperCAmelCase_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction="inputs" ) UpperCAmelCase_ = {0: "batch", 1: "past_sequence + sequence"} else: UpperCAmelCase_ = {0: "batch", 1: "sequence"} return common_inputs @property def __A ( self : Union[str, Any] ): '''simple docstring''' return self._config.n_layer @property def __A ( self : Union[str, Any] ): '''simple docstring''' return self._config.n_head def __A ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : List[Any] = -1 , lowerCAmelCase : List[Any] = -1 , lowerCAmelCase : List[str] = False , lowerCAmelCase : Tuple = None , ): '''simple docstring''' UpperCAmelCase_ = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( __SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() UpperCAmelCase_ = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCAmelCase_ , UpperCAmelCase_ = common_inputs["input_ids"].shape # Not using the same length for past_key_values UpperCAmelCase_ = seqlen + 2 UpperCAmelCase_ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCAmelCase_ = [ (torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] UpperCAmelCase_ = common_inputs["attention_mask"] if self.use_past: UpperCAmelCase_ = ordered_inputs["attention_mask"].dtype UpperCAmelCase_ = torch.cat( [ordered_inputs["attention_mask"], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __A ( self : Dict ): '''simple docstring''' return 13
162
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() _a : Tuple = logging.get_logger(__name__) _a : Optional[int] = ["""model.decoder.embed_positions.weights"""] def _lowerCAmelCase ( lowercase ) -> Optional[Any]: if "emb" in name: __lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: __lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: __lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: __lowerCAmelCase = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: __lowerCAmelCase = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: __lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: __lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: __lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: __lowerCAmelCase = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: __lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]: __lowerCAmelCase = list(state_dict.keys() ) __lowerCAmelCase = {} for key in keys: __lowerCAmelCase = state_dict.pop(lowercase ) __lowerCAmelCase = rename_keys(lowercase ) if "in_proj_weight" in key: # split fused qkv proj __lowerCAmelCase = val[:hidden_size, :] __lowerCAmelCase = val[hidden_size : 2 * hidden_size, :] __lowerCAmelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __lowerCAmelCase = val else: __lowerCAmelCase = val return state_dict, enc_dec_proj_state_dict def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values __lowerCAmelCase = 1024 __lowerCAmelCase = 24 __lowerCAmelCase = 16 elif checkpoint == "medium": __lowerCAmelCase = 1536 __lowerCAmelCase = 48 __lowerCAmelCase = 24 elif checkpoint == "large": __lowerCAmelCase = 2048 __lowerCAmelCase = 48 __lowerCAmelCase = 32 else: raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) __lowerCAmelCase = MusicgenDecoderConfig( hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , ) return config @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]: __lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase ) __lowerCAmelCase = decoder_config_from_checkpoint(lowercase ) __lowerCAmelCase = fairseq_model.lm.state_dict() __lowerCAmelCase , __lowerCAmelCase = rename_state_dict( lowercase , hidden_size=decoder_config.hidden_size ) __lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" ) __lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) __lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(lowercase ) if len(lowercase ) > 0: raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' ) if len(lowercase ) > 0: raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model __lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(lowercase ) # check we can do a forward pass __lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __lowerCAmelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits if logits.shape != (8, 1, 2048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor __lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" ) __lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) __lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase ) # set the appropriate bos/pad token ids __lowerCAmelCase = 2048 __lowerCAmelCase = 2048 # set other default generation config params __lowerCAmelCase = int(30 * audio_encoder.config.frame_rate ) __lowerCAmelCase = True __lowerCAmelCase = 3.0 if pytorch_dump_folder is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(lowercase ) processor.save_pretrained(lowercase ) if repo_id: logger.info(f'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(lowercase ) processor.push_to_hub(lowercase ) if __name__ == "__main__": _a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) _a : List[Any] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
689
0
'''simple docstring''' import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging _UpperCamelCase = logging.get_logger(__name__) def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __A : int = set() __A : int = [] def parse_line(SCREAMING_SNAKE_CASE ): for line in fp: if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __A : Any = line.decode("UTF-8" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(" " ): # process a single warning and move it to `selected_warnings`. if len(SCREAMING_SNAKE_CASE ) > 0: __A : List[str] = "\n".join(SCREAMING_SNAKE_CASE ) # Only keep the warnings specified in `targets` if any(f": {x}: " in warning for x in targets ): selected_warnings.add(SCREAMING_SNAKE_CASE ) buffer.clear() continue else: __A : Optional[int] = line.strip() buffer.append(SCREAMING_SNAKE_CASE ) if from_gh: for filename in os.listdir(SCREAMING_SNAKE_CASE ): __A : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): # read the file if filename != "warnings.txt": continue with open(SCREAMING_SNAKE_CASE ) as fp: parse_line(SCREAMING_SNAKE_CASE ) else: try: with zipfile.ZipFile(SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE ): # read the file if filename != "warnings.txt": continue with z.open(SCREAMING_SNAKE_CASE ) as fp: parse_line(SCREAMING_SNAKE_CASE ) except Exception: logger.warning( f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." ) return selected_warnings def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __A : int = set() __A : List[str] = [os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for p in os.listdir(SCREAMING_SNAKE_CASE ) if (p.endswith(".zip" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) return selected_warnings if __name__ == "__main__": def _lowercase (SCREAMING_SNAKE_CASE ): '''simple docstring''' return values.split("," ) _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links _UpperCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts _UpperCamelCase = extract_warnings(args.output_dir, args.targets) _UpperCamelCase = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
111
'''simple docstring''' from collections import deque def _lowerCAmelCase ( lowercase ) -> Dict: __lowerCAmelCase = len(lowercase ) __lowerCAmelCase = deque() __lowerCAmelCase = [False for _ in range(lowercase )] __lowerCAmelCase = [-1 for _ in range(lowercase )] __lowerCAmelCase = index_of[:] def strong_connect(lowercase , lowercase , lowercase ): __lowerCAmelCase = index # the number when this node is seen __lowerCAmelCase = index # lowest rank node reachable from here index += 1 stack.append(lowercase ) __lowerCAmelCase = True for w in g[v]: if index_of[w] == -1: __lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase ) __lowerCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: __lowerCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: __lowerCAmelCase = [] __lowerCAmelCase = stack.pop() __lowerCAmelCase = False component.append(lowercase ) while w != v: __lowerCAmelCase = stack.pop() __lowerCAmelCase = False component.append(lowercase ) components.append(lowercase ) return index __lowerCAmelCase = [] for v in range(lowercase ): if index_of[v] == -1: strong_connect(lowercase , 0 , lowercase ) return components def _lowerCAmelCase ( lowercase , lowercase ) -> str: __lowerCAmelCase = [[] for _ in range(lowercase )] for u, v in edges: g[u].append(lowercase ) return g if __name__ == "__main__": # Test _a : Any = 7 _a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6] _a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5] _a : Optional[Any] = [(u, v) for u, v in zip(source, target)] _a : Optional[int] = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
689
0
'''simple docstring''' from math import isqrt, loga def __UpperCAmelCase (lowercase__ ) -> list[int]: '''simple docstring''' a_ = [True] * max_number for i in range(2 ,isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 ,lowercase__ ,lowercase__ ): a_ = False return [i for i in range(2 ,lowercase__ ) if is_prime[i]] def __UpperCAmelCase (lowercase__ = 800800 ,lowercase__ = 800800 ) -> int: '''simple docstring''' a_ = degree * loga(lowercase__ ) a_ = int(lowercase__ ) a_ = calculate_prime_numbers(lowercase__ ) a_ = 0 a_ = 0 a_ = len(lowercase__ ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'{solution() = }')
685
'''simple docstring''' from argparse import ArgumentParser from .env import EnvironmentCommand def _lowerCAmelCase ( ) -> Union[str, Any]: __lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) __lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(lowercase ) # Let's go __lowerCAmelCase = parser.parse_args() if not hasattr(lowercase , """func""" ): parser.print_help() exit(1 ) # Run __lowerCAmelCase = args.func(lowercase ) service.run() if __name__ == "__main__": main()
689
0
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join lowercase_ : List[Any] = "__test_patch_submodule_mock__" with patch_submodule(_test_patching , "os.path.join" , _UpperCamelCase ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" assert _test_patching.open is open lowercase_ : str = "__test_patch_submodule_builtin_mock__" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , "open" , _UpperCamelCase ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowercase_ : List[str] = "__test_patch_submodule_missing_mock__" with patch_submodule(_test_patching , "pandas.read_csv" , _UpperCamelCase ): pass def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowercase_ : Any = "__test_patch_submodule_missing_builtin_mock__" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , "len" , _UpperCamelCase ) is None with patch_submodule(_test_patching , "len" , _UpperCamelCase ): assert _test_patching.len is mock assert _test_patching.len is len def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowercase_ : Optional[int] = "__test_patch_submodule_start_and_stop_mock__" lowercase_ : Optional[int] = patch_submodule(_test_patching , "open" , _UpperCamelCase ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join lowercase_ : List[str] = "__test_patch_submodule_successive_join__" lowercase_ : List[str] = "__test_patch_submodule_successive_dirname__" lowercase_ : Any = "__test_patch_submodule_successive_rename__" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , "os.path.join" , _UpperCamelCase ): with patch_submodule(_test_patching , "os.rename" , _UpperCamelCase ): with patch_submodule(_test_patching , "os.path.dirname" , _UpperCamelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , "os.rename" , _UpperCamelCase ): with patch_submodule(_test_patching , "os.path.join" , _UpperCamelCase ): with patch_submodule(_test_patching , "os.path.dirname" , _UpperCamelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowercase_ : Dict = "__test_patch_submodule_doesnt_exist_mock__" with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , _UpperCamelCase ): pass with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , _UpperCamelCase ): pass
620
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _a : List[Any] = logging.get_logger(__name__) _a : int = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } _a : Any = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str: for attribute in key.split(""".""" ): __lowerCAmelCase = getattr(lowercase , lowercase ) if weight_type is not None: __lowerCAmelCase = getattr(lowercase , lowercase ).shape else: __lowerCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": __lowerCAmelCase = value elif weight_type == "weight_g": __lowerCAmelCase = value elif weight_type == "weight_v": __lowerCAmelCase = value elif weight_type == "bias": __lowerCAmelCase = value else: __lowerCAmelCase = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]: __lowerCAmelCase = [] __lowerCAmelCase = fairseq_model.state_dict() __lowerCAmelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , ) __lowerCAmelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key): # special case since naming is very similar continue __lowerCAmelCase = True if "*" in mapped_key: __lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2] __lowerCAmelCase = mapped_key.replace("""*""" , lowercase ) if "weight_g" in name: __lowerCAmelCase = """weight_g""" elif "weight_v" in name: __lowerCAmelCase = """weight_v""" elif "bias" in name: __lowerCAmelCase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase = """weight""" else: __lowerCAmelCase = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(f'Unused weights: {unused_weights}' ) def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __lowerCAmelCase = full_name.split("""conv_layers.""" )[-1] __lowerCAmelCase = name.split(""".""" ) __lowerCAmelCase = int(items[0] ) __lowerCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowercase ) @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict: if config_path is not None: __lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase ) else: __lowerCAmelCase = UniSpeechSatConfig() __lowerCAmelCase = """""" if is_finetuned: __lowerCAmelCase = UniSpeechSatForCTC(lowercase ) else: __lowerCAmelCase = UniSpeechSatForPreTraining(lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) __lowerCAmelCase = model[0].eval() recursively_load_weights(lowercase , lowercase ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": _a : List[str] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) _a : Union[str, Any] = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
689
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ): """simple docstring""" if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) a_ : Tuple = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b" a_ : List[Any] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b" a_ : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
442
'''simple docstring''' from scipy.stats import spearmanr import datasets _a : str = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ _a : Dict = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ _a : List[str] = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): def lowerCamelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
689
0
from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def UpperCamelCase ( ): snake_case : str = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=__lowerCamelCase ) snake_case : List[Any] = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=__lowerCamelCase ) env_command_parser(subparsers=__lowerCamelCase ) launch_command_parser(subparsers=__lowerCamelCase ) tpu_command_parser(subparsers=__lowerCamelCase ) test_command_parser(subparsers=__lowerCamelCase ) # Let's go snake_case : Dict = parser.parse_args() if not hasattr(__lowerCamelCase , "func" ): parser.print_help() exit(1 ) # Run args.func(__lowerCamelCase ) if __name__ == "__main__": main()
204
'''simple docstring''' from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ): a : List[str] =["""onnx"""] def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' requires_backends(self,["""onnx"""] ) @classmethod def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' requires_backends(cls,["""onnx"""] ) @classmethod def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' requires_backends(cls,["""onnx"""] )
689
0
import sys _snake_case : Any = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __snake_case ( __magic_name__ ): '''simple docstring''' lowercase = 1 for digit in s: product *= int(__magic_name__ ) return product def __snake_case ( __magic_name__ = N ): '''simple docstring''' lowercase = -sys.maxsize - 1 lowercase = n[:13] lowercase = 13 while cur_index < len(__magic_name__ ) - 13: if int(n[cur_index] ) >= int(substr[0] ): lowercase = substr[1:] + n[cur_index] cur_index += 1 else: lowercase = max(__magic_name__ , str_eval(__magic_name__ ) ) lowercase = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F"{solution() = }")
441
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _a : int = logging.get_logger(__name__) _a : Optional[int] = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class _UpperCAmelCase ( lowerCAmelCase_ ): a : List[str] ="""gptj""" a : Optional[int] ={ """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = vocab_size __lowerCAmelCase = n_positions __lowerCAmelCase = n_embd __lowerCAmelCase = n_layer __lowerCAmelCase = n_head __lowerCAmelCase = n_inner __lowerCAmelCase = rotary_dim __lowerCAmelCase = activation_function __lowerCAmelCase = resid_pdrop __lowerCAmelCase = embd_pdrop __lowerCAmelCase = attn_pdrop __lowerCAmelCase = layer_norm_epsilon __lowerCAmelCase = initializer_range __lowerCAmelCase = use_cache __lowerCAmelCase = bos_token_id __lowerCAmelCase = eos_token_id super().__init__( bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ) class _UpperCAmelCase ( lowerCAmelCase_ ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,): '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE ) if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ): # TODO: how to do that better? __lowerCAmelCase = 0 @property def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" ) __lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""} else: __lowerCAmelCase = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowerCamelCase__ ( self ): '''simple docstring''' return self._config.n_layer @property def lowerCamelCase__ ( self ): '''simple docstring''' return self._config.n_head def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,): '''simple docstring''' __lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs( __SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() __lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __lowerCAmelCase = seqlen + 2 __lowerCAmelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __lowerCAmelCase = [ (torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] __lowerCAmelCase = common_inputs["""attention_mask"""] if self.use_past: __lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype __lowerCAmelCase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 ) return ordered_inputs @property def lowerCamelCase__ ( self ): '''simple docstring''' return 13
689
0
'''simple docstring''' from collections import deque def __UpperCamelCase ( lowercase_ : Any ): """simple docstring""" a_ = len(lowercase_ ) a_ = deque() a_ = [False for _ in range(lowercase_ )] a_ = [-1 for _ in range(lowercase_ )] a_ = index_of[:] def strong_connect(lowercase_ : int , lowercase_ : Any , lowercase_ : str ): a_ = index # the number when this node is seen a_ = index # lowest rank node reachable from here index += 1 stack.append(lowercase_ ) a_ = True for w in g[v]: if index_of[w] == -1: a_ = strong_connect(lowercase_ , lowercase_ , lowercase_ ) a_ = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: a_ = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: a_ = [] a_ = stack.pop() a_ = False component.append(lowercase_ ) while w != v: a_ = stack.pop() a_ = False component.append(lowercase_ ) components.append(lowercase_ ) return index a_ = [] for v in range(lowercase_ ): if index_of[v] == -1: strong_connect(lowercase_ , 0 , lowercase_ ) return components def __UpperCamelCase ( lowercase_ : str , lowercase_ : Dict ): """simple docstring""" a_ = [[] for _ in range(lowercase_ )] for u, v in edges: g[u].append(lowercase_ ) return g if __name__ == "__main__": # Test __lowerCAmelCase = 7 __lowerCAmelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6] __lowerCAmelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5] __lowerCAmelCase = [(u, v) for u, v in zip(source, target)] __lowerCAmelCase = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
536
'''simple docstring''' def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int: __lowerCAmelCase = set() __lowerCAmelCase = int((limit - 24) ** (1 / 2) ) __lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) ) for primea in primes: __lowerCAmelCase = primea * primea for primea in primes: __lowerCAmelCase = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: __lowerCAmelCase = primea * primea * primea * primea __lowerCAmelCase = square + cube + tetr if total >= limit: break ret.add(lowercase ) return len(lowercase ) if __name__ == "__main__": print(f'{solution() = }')
689
0
"""simple docstring""" import copy import random from transformers import CLIPTokenizer class _UpperCamelCase ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self , *__a , **__a ): super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = {} def snake_case ( self , __a , *__a , **__a ): __lowerCAmelCase = super().add_tokens(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) def snake_case ( self , __a , *__a , __a=1 , **__a ): __lowerCAmelCase = [] if num_vec_per_token == 1: self.try_adding_tokens(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) output.append(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = [] for i in range(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = placeholder_token + f"_{i}" self.try_adding_tokens(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) output.append(__SCREAMING_SNAKE_CASE ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( f"The tokenizer already has placeholder token {token} that can get confused with" f" {placeholder_token}keep placeholder tokens independent" ) __lowerCAmelCase = output def snake_case ( self , __a , __a=False , __a=1.0 ): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __lowerCAmelCase = [] for i in range(len(__SCREAMING_SNAKE_CASE ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__SCREAMING_SNAKE_CASE ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: __lowerCAmelCase = self.token_map[placeholder_token] __lowerCAmelCase = tokens[: 1 + int(len(__SCREAMING_SNAKE_CASE ) * prop_tokens_to_load )] if vector_shuffle: __lowerCAmelCase = copy.copy(__SCREAMING_SNAKE_CASE ) random.shuffle(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = text.replace(__SCREAMING_SNAKE_CASE , " ".join(__SCREAMING_SNAKE_CASE ) ) return text def __call__( self , __a , *__a , __a=False , __a=1.0 , **__a ): return super().__call__( self.replace_placeholder_tokens_in_text( __SCREAMING_SNAKE_CASE , vector_shuffle=__SCREAMING_SNAKE_CASE , prop_tokens_to_load=__SCREAMING_SNAKE_CASE ) , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) def snake_case ( self , __a , *__a , __a=False , __a=1.0 , **__a ): return super().encode( self.replace_placeholder_tokens_in_text( __SCREAMING_SNAKE_CASE , vector_shuffle=__SCREAMING_SNAKE_CASE , prop_tokens_to_load=__SCREAMING_SNAKE_CASE ) , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
636
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ): a : Optional[int] =TextToVideoSDPipeline a : Optional[int] =TEXT_TO_IMAGE_PARAMS a : Any =TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. a : Union[str, Any] =frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def lowerCamelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,) __lowerCAmelCase = DDIMScheduler( beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,) torch.manual_seed(0 ) __lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,) __lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ): '''simple docstring''' if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = """np""" __lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames __lowerCAmelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) __lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase__ ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",) def lowerCamelCase__ ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowerCamelCase__ ( self ): '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowerCamelCase__ ( self ): '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def lowerCamelCase__ ( self ): '''simple docstring''' pass def lowerCamelCase__ ( self ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) __lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) __lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __lowerCAmelCase = pipe.to("""cuda""" ) __lowerCAmelCase = """Spiderman is surfing""" __lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames __lowerCAmelCase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) __lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) __lowerCAmelCase = pipe.to("""cuda""" ) __lowerCAmelCase = """Spiderman is surfing""" __lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames __lowerCAmelCase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
689
0
"""simple docstring""" def __lowerCAmelCase ( lowercase : str , lowercase : int ) -> list[int]: """simple docstring""" snake_case : str = int(lowercase ) # Initialize Result snake_case : Dict = [] # Traverse through all denomination for denomination in reversed(lowercase ): # Find denominations while int(lowercase ) >= int(lowercase ): total_value -= int(lowercase ) answer.append(lowercase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __snake_case = [] __snake_case = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): __snake_case = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) __snake_case = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter __snake_case = [1, 2, 5, 10, 20, 50, 100, 500, 2000] __snake_case = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F'''Following is minimal change for {value}: ''') __snake_case = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
178
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _lowerCAmelCase ( lowercase ) -> Optional[int]: if not is_accelerate_available(): return method __lowerCAmelCase = version.parse(accelerate.__version__ ).base_version if version.parse(lowercase ) < version.parse("""0.17.0""" ): return method def wrapper(self , *lowercase , **lowercase ): if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ): self._hf_hook.pre_forward(self ) return method(self , *lowercase , **lowercase ) return wrapper
689
0
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class a ( lowerCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Optional[int] = TextToVideoSDPipeline __lowerCAmelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS __lowerCAmelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __lowerCAmelCase : Union[str, Any] = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def __lowerCamelCase ( self :Union[str, Any] ): torch.manual_seed(0 ) snake_case__ : str = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,) snake_case__ : Optional[Any] = DDIMScheduler( beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=__SCREAMING_SNAKE_CASE ,set_alpha_to_one=__SCREAMING_SNAKE_CASE ,) torch.manual_seed(0 ) snake_case__ : Union[str, Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,) torch.manual_seed(0 ) snake_case__ : int = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,) snake_case__ : str = CLIPTextModel(__SCREAMING_SNAKE_CASE ) snake_case__ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case__ : Dict = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def __lowerCamelCase ( self :Tuple ,__lowercase :str ,__lowercase :str=0 ): if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ): snake_case__ : str = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: snake_case__ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) snake_case__ : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def __lowerCamelCase ( self :Any ): snake_case__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case__ : str = self.get_dummy_components() snake_case__ : str = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = sd_pipe.to(__SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) snake_case__ : str = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) snake_case__ : int = '''np''' snake_case__ : str = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames snake_case__ : str = frames[0][-3:, -3:, -1] assert frames[0].shape == (6_4, 6_4, 3) snake_case__ : str = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCamelCase ( self :List[Any] ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE ,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def __lowerCamelCase ( self :Union[str, Any] ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE ,expected_max_diff=1e-2 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def __lowerCamelCase ( self :Any ): pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def __lowerCamelCase ( self :Optional[Any] ): pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def __lowerCamelCase ( self :List[Any] ): pass def __lowerCamelCase ( self :List[Any] ): return super().test_progress_bar() @slow @skip_mps class a ( unittest.TestCase ): def __lowerCamelCase ( self :Optional[int] ): snake_case__ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' ) snake_case__ : List[Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) snake_case__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) snake_case__ : Tuple = pipe.to('''cuda''' ) snake_case__ : Dict = '''Spiderman is surfing''' snake_case__ : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) snake_case__ : Dict = pipe(__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,num_inference_steps=2_5 ,output_type='''pt''' ).frames snake_case__ : Tuple = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def __lowerCamelCase ( self :Optional[int] ): snake_case__ : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' ) snake_case__ : List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) snake_case__ : Union[str, Any] = pipe.to('''cuda''' ) snake_case__ : Optional[Any] = '''Spiderman is surfing''' snake_case__ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 ) snake_case__ : str = pipe(__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,num_inference_steps=2 ,output_type='''pt''' ).frames snake_case__ : List[str] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
252
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: # load base model __lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors __lowerCAmelCase = load_file(lowercase ) __lowerCAmelCase = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: __lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) __lowerCAmelCase = pipeline.text_encoder else: __lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) __lowerCAmelCase = pipeline.unet # find the target layer __lowerCAmelCase = layer_infos.pop(0 ) while len(lowercase ) > -1: try: __lowerCAmelCase = curr_layer.__getattr__(lowercase ) if len(lowercase ) > 0: __lowerCAmelCase = layer_infos.pop(0 ) elif len(lowercase ) == 0: break except Exception: if len(lowercase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: __lowerCAmelCase = layer_infos.pop(0 ) __lowerCAmelCase = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) ) pair_keys.append(lowercase ) else: pair_keys.append(lowercase ) pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: __lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) __lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 ) else: __lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa ) __lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ) # update visited list for item in pair_keys: visited.append(lowercase ) return pipeline if __name__ == "__main__": _a : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") _a : Optional[int] = parser.parse_args() _a : Dict = args.base_model_path _a : Optional[Any] = args.checkpoint_path _a : Union[str, Any] = args.dump_path _a : Optional[int] = args.lora_prefix_unet _a : int = args.lora_prefix_text_encoder _a : str = args.alpha _a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _a : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
689
0
from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def __lowerCAmelCase ( A ): if not is_accelerate_available(): return method UpperCAmelCase_ = version.parse(accelerate.__version__ ).base_version if version.parse(A ) < version.parse("0.17.0" ): return method def wrapper(self , *A , **A ): if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ): self._hf_hook.pre_forward(self ) return method(self , *A , **A ) return wrapper
162
'''simple docstring''' from collections import Counter from timeit import timeit def _lowerCAmelCase ( lowercase = "" , ) -> bool: return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2 def _lowerCAmelCase ( lowercase = "" ) -> bool: if len(lowercase ) == 0: return True __lowerCAmelCase = input_str.replace(""" """ , """""" ).lower() # character_freq_dict: Stores the frequency of every character in the input string __lowerCAmelCase = {} for character in lower_case_input_str: __lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1 __lowerCAmelCase = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def _lowerCAmelCase ( lowercase = "" ) -> None: print("""\nFor string = """ , lowercase , """:""" ) print( """> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) print( """> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) if __name__ == "__main__": _a : int = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) _a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str) print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
689
0
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 _UpperCamelCase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") _UpperCamelCase = get_tests_dir("""fixtures/vocab.json""") _UpperCamelCase = get_tests_dir("""fixtures""") class __magic_name__ ( unittest.TestCase ): """simple docstring""" lowerCamelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] def lowerCAmelCase__ ( self ): '''simple docstring''' __A : Optional[Any] = 0 def lowerCAmelCase__ ( self ): '''simple docstring''' __A : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __A : Optional[int] = WavaVecaConfig() __A : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" ) # save in new folder model_config.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __A : Union[str, Any] = AutoProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) copyfile(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , "vocab.json" ) ) __A : Optional[int] = AutoProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __A : Union[str, Any] = WavaVecaFeatureExtractor() __A : int = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" ) __A : Dict = WavaVecaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(__SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , "r" ) as f: __A : str = json.load(__SCREAMING_SNAKE_CASE ) config_dict.pop("processor_class" ) with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , "w" ) as f: f.write(json.dumps(__SCREAMING_SNAKE_CASE ) ) __A : Optional[int] = AutoProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __A : List[Any] = WavaVecaFeatureExtractor() __A : Optional[Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" ) __A : Any = WavaVecaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(__SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , "r" ) as f: __A : List[Any] = json.load(__SCREAMING_SNAKE_CASE ) config_dict.pop("processor_class" ) with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , "w" ) as f: f.write(json.dumps(__SCREAMING_SNAKE_CASE ) ) __A : Optional[Any] = AutoProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __A : Union[str, Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" ) model_config.save_pretrained(__SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , "vocab.json" ) ) # create emtpy sample processor with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , "w" ) as f: f.write("{}" ) __A : List[str] = AutoProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self ): '''simple docstring''' with self.assertRaises(__SCREAMING_SNAKE_CASE ): __A : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__SCREAMING_SNAKE_CASE ): __A : Dict = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=__SCREAMING_SNAKE_CASE ) __A : Tuple = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) __A : Union[str, Any] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) __A : Optional[Any] = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) # Test we can also load the slow version __A : Dict = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE ) __A : Optional[Any] = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" ) else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) def lowerCAmelCase__ ( self ): '''simple docstring''' try: AutoConfig.register("custom" , __SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) AutoTokenizer.register(__SCREAMING_SNAKE_CASE , slow_tokenizer_class=__SCREAMING_SNAKE_CASE ) AutoProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__SCREAMING_SNAKE_CASE ): AutoProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API __A : str = CustomFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: __A : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , "vocab.txt" ) with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) __A : Tuple = CustomTokenizer(__SCREAMING_SNAKE_CASE ) __A : List[str] = CustomProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __A : str = AutoProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def lowerCAmelCase__ ( self ): '''simple docstring''' class __magic_name__ ( lowerCAmelCase_ ): """simple docstring""" lowerCamelCase__ = False class __magic_name__ ( lowerCAmelCase_ ): """simple docstring""" lowerCamelCase__ = False class __magic_name__ ( lowerCAmelCase_ ): """simple docstring""" lowerCamelCase__ = """AutoFeatureExtractor""" lowerCamelCase__ = """AutoTokenizer""" lowerCamelCase__ = False try: AutoConfig.register("custom" , __SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) AutoTokenizer.register(__SCREAMING_SNAKE_CASE , slow_tokenizer_class=__SCREAMING_SNAKE_CASE ) AutoProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. __A : Any = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. __A : Tuple = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. __A : Optional[int] = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def lowerCAmelCase__ ( self ): '''simple docstring''' __A : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" ) def lowerCAmelCase__ ( self ): '''simple docstring''' __A : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" ) self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" ) @is_staging_test class __magic_name__ ( unittest.TestCase ): """simple docstring""" lowerCamelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] @classmethod def lowerCAmelCase__ ( cls ): '''simple docstring''' __A : List[str] = TOKEN HfFolder.save_token(__SCREAMING_SNAKE_CASE ) @classmethod def lowerCAmelCase__ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id="test-processor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-processor" ) except HTTPError: pass def lowerCAmelCase__ ( self ): '''simple docstring''' __A : Union[str, Any] = WavaVecaProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(__SCREAMING_SNAKE_CASE , "test-processor" ) , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token ) __A : List[Any] = WavaVecaProcessor.from_pretrained(f"{USER}/test-processor" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , __SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def lowerCAmelCase__ ( self ): '''simple docstring''' __A : List[Any] = WavaVecaProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(__SCREAMING_SNAKE_CASE , "test-processor-org" ) , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization="valid_org" , ) __A : str = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , __SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def lowerCAmelCase__ ( self ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() __A : List[Any] = CustomFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: __A : Any = os.path.join(__SCREAMING_SNAKE_CASE , "vocab.txt" ) with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) __A : Optional[Any] = CustomTokenizer(__SCREAMING_SNAKE_CASE ) __A : Any = CustomProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f"{USER}/test-dynamic-processor" , token=self._token ) __A : str = Repository(__SCREAMING_SNAKE_CASE , clone_from=f"{USER}/test-dynamic-processor" , token=self._token ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { "AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor", "AutoProcessor": "custom_processing.CustomProcessor", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(__SCREAMING_SNAKE_CASE , "tokenizer_config.json" ) ) as f: __A : Tuple = json.load(__SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config["auto_map"] , { "AutoTokenizer": ["custom_tokenization.CustomTokenizer", None], "AutoProcessor": "custom_processing.CustomProcessor", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , "custom_feature_extraction.py" ) ) ) self.assertTrue(os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , "custom_tokenization.py" ) ) ) self.assertTrue(os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , "custom_processing.py" ) ) ) repo.push_to_hub() __A : Tuple = AutoProcessor.from_pretrained(f"{USER}/test-dynamic-processor" , trust_remote_code=__SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
111
'''simple docstring''' import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _lowerCAmelCase ( lowercase ) -> List[Any]: __lowerCAmelCase = VideoMAEConfig() set_architecture_configs(lowercase , lowercase ) if "finetuned" not in model_name: __lowerCAmelCase = False if "finetuned" in model_name: __lowerCAmelCase = """huggingface/label-files""" if "kinetics" in model_name: __lowerCAmelCase = 400 __lowerCAmelCase = """kinetics400-id2label.json""" elif "ssv2" in model_name: __lowerCAmelCase = 174 __lowerCAmelCase = """something-something-v2-id2label.json""" else: raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" ) __lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) ) __lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} return config def _lowerCAmelCase ( lowercase , lowercase ) -> Any: if "small" in model_name: __lowerCAmelCase = 384 __lowerCAmelCase = 1536 __lowerCAmelCase = 12 __lowerCAmelCase = 16 __lowerCAmelCase = 12 __lowerCAmelCase = 3 __lowerCAmelCase = 192 __lowerCAmelCase = 768 elif "large" in model_name: __lowerCAmelCase = 1024 __lowerCAmelCase = 4096 __lowerCAmelCase = 24 __lowerCAmelCase = 16 __lowerCAmelCase = 12 __lowerCAmelCase = 8 __lowerCAmelCase = 512 __lowerCAmelCase = 2048 elif "huge" in model_name: __lowerCAmelCase = 1280 __lowerCAmelCase = 5120 __lowerCAmelCase = 32 __lowerCAmelCase = 16 __lowerCAmelCase = 12 __lowerCAmelCase = 8 __lowerCAmelCase = 640 __lowerCAmelCase = 2560 elif "base" not in model_name: raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" ) def _lowerCAmelCase ( lowercase ) -> List[str]: if "encoder." in name: __lowerCAmelCase = name.replace("""encoder.""" , """""" ) if "cls_token" in name: __lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" ) if "decoder_pos_embed" in name: __lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: __lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" ) if "decoder.blocks" in name: __lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: __lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" ) if "attn.proj" in name: __lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "bias" not in name: __lowerCAmelCase = name.replace("""attn""" , """attention.self""" ) if "attn" in name: __lowerCAmelCase = name.replace("""attn""" , """attention.attention""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: __lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: __lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: __lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: __lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: __lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" ) if "head" in name and "decoder" not in name: __lowerCAmelCase = name.replace("""head""" , """classifier""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]: for key in orig_state_dict.copy().keys(): __lowerCAmelCase = orig_state_dict.pop(lowercase ) if key.startswith("""encoder.""" ): __lowerCAmelCase = key.replace("""encoder.""" , """""" ) if "qkv" in key: __lowerCAmelCase = key.split(""".""" ) if key.startswith("""decoder.blocks""" ): __lowerCAmelCase = config.decoder_hidden_size __lowerCAmelCase = int(key_split[2] ) __lowerCAmelCase = """decoder.decoder_layers.""" if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[dim : dim * 2, :] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = config.hidden_size __lowerCAmelCase = int(key_split[1] ) __lowerCAmelCase = """videomae.encoder.layer.""" if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[dim : dim * 2, :] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val return orig_state_dict def _lowerCAmelCase ( ) -> str: __lowerCAmelCase = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __lowerCAmelCase = np.load(lowercase ) return list(lowercase ) def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __lowerCAmelCase = get_videomae_config(lowercase ) if "finetuned" in model_name: __lowerCAmelCase = VideoMAEForVideoClassification(lowercase ) else: __lowerCAmelCase = VideoMAEForPreTraining(lowercase ) # download original checkpoint, hosted on Google Drive __lowerCAmelCase = """pytorch_model.bin""" gdown.cached_download(lowercase , lowercase , quiet=lowercase ) __lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" ) if "model" in files: __lowerCAmelCase = files["""model"""] else: __lowerCAmelCase = files["""module"""] __lowerCAmelCase = convert_state_dict(lowercase , lowercase ) model.load_state_dict(lowercase ) model.eval() # verify model on basic input __lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) __lowerCAmelCase = prepare_video() __lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" ) if "finetuned" not in model_name: __lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" ) __lowerCAmelCase = torch.load(lowercase ) __lowerCAmelCase = model(**lowercase ) __lowerCAmelCase = outputs.logits __lowerCAmelCase = [ """videomae-small-finetuned-kinetics""", """videomae-small-finetuned-ssv2""", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) """videomae-base-short""", """videomae-base-short-finetuned-kinetics""", """videomae-base""", """videomae-base-finetuned-kinetics""", """videomae-large""", """videomae-large-finetuned-kinetics""", """videomae-huge-finetuned-kinetics""", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) """videomae-base-short-ssv2""", """videomae-base-short-finetuned-ssv2""", """videomae-base-ssv2""", """videomae-base-finetuned-ssv2""", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": __lowerCAmelCase = torch.Size([1, 174] ) __lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one __lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": __lowerCAmelCase = torch.Size([1, 174] ) __lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": __lowerCAmelCase = torch.Size([1, 174] ) __lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(f'Model name not supported. Should be one of {model_names}' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 ) else: print("""Logits:""" , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 ) print("""Logits ok!""" ) # verify loss, if applicable if model_name == "videomae-base-short": __lowerCAmelCase = outputs.loss assert torch.allclose(lowercase , lowercase , atol=1e-4 ) print("""Loss ok!""" ) if pytorch_dump_folder_path is not None: print(f'Saving model and image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowercase ) model.save_pretrained(lowercase ) if push_to_hub: print("""Pushing to the hub...""" ) model.push_to_hub(lowercase , organization="""nielsr""" ) if __name__ == "__main__": _a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""", type=str, help=( """URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct""" """ download link.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default="""/Users/nielsrogge/Documents/VideoMAE/Test""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : int = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
689
0
'''simple docstring''' import re import subprocess import sys a_ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') a_ = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode('utf-8').split() a_ = """|""".join(sys.argv[1:]) a_ = re.compile(rF'^({joined_dirs}).*?\.py$') a_ = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
685
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _a : Tuple = """\ """ _a : Tuple = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ _a : Optional[Any] = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): def lowerCamelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": __lowerCAmelCase = """cuda""" else: __lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu""" __lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: __lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__SCREAMING_SNAKE_CASE ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" __lowerCAmelCase = model.config.max_length - 1 else: __lowerCAmelCase = model.config.max_length __lowerCAmelCase = tokenizer( __SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = encodings["""input_ids"""] __lowerCAmelCase = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." __lowerCAmelCase = [] __lowerCAmelCase = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ): __lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = encoded_texts[start_index:end_index] __lowerCAmelCase = attn_masks[start_index:end_index] if add_start_token: __lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 ) __lowerCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 ) __lowerCAmelCase = encoded_batch with torch.no_grad(): __lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits __lowerCAmelCase = out_logits[..., :-1, :].contiguous() __lowerCAmelCase = labels[..., 1:].contiguous() __lowerCAmelCase = attn_mask[..., 1:].contiguous() __lowerCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
689
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class _UpperCAmelCase : def __init__( self : List[Any] , a : str , ): '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = 1_3 lowercase_ : Union[str, Any] = 7 lowercase_ : List[str] = True lowercase_ : Dict = True lowercase_ : List[Any] = True lowercase_ : Optional[Any] = True lowercase_ : Optional[int] = True lowercase_ : str = False lowercase_ : Tuple = False lowercase_ : Any = False lowercase_ : Union[str, Any] = 2 lowercase_ : Dict = 9_9 lowercase_ : Optional[Any] = 0 lowercase_ : Tuple = 3_2 lowercase_ : str = 2 lowercase_ : Any = 4 lowercase_ : Union[str, Any] = 0.1 lowercase_ : int = 0.1 lowercase_ : Any = 5_1_2 lowercase_ : Union[str, Any] = 1_6 lowercase_ : int = 2 lowercase_ : Dict = 0.02 lowercase_ : List[str] = 3 lowercase_ : Tuple = 4 lowercase_ : Optional[int] = "last" lowercase_ : Union[str, Any] = True lowercase_ : Optional[Any] = None lowercase_ : Union[str, Any] = 0 def lowerCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) lowercase_ : Optional[int] = None if self.use_input_lengths: lowercase_ : Optional[int] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase_ : Optional[Any] = None if self.use_token_type_ids: lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase_ : str = None lowercase_ : Optional[Any] = None lowercase_ : str = None if self.use_labels: lowercase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ : Any = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) lowercase_ : Any = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ : List[str] = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowerCAmelCase__ ( self : Dict , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Union[str, Any] , a : Any , a : Optional[int] , a : Any , a : Optional[Any] , a : str , ): '''simple docstring''' lowercase_ : Optional[Any] = TFFlaubertModel(config=__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} lowercase_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = [input_ids, input_mask] lowercase_ : List[str] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self : int , a : Dict , a : Any , a : List[Any] , a : Dict , a : Optional[Any] , a : str , a : List[str] , a : int , a : Tuple , ): '''simple docstring''' lowercase_ : Optional[Any] = TFFlaubertWithLMHeadModel(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} lowercase_ : List[str] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self : Optional[int] , a : int , a : Tuple , a : Any , a : int , a : str , a : List[str] , a : Optional[Any] , a : List[str] , a : Dict , ): '''simple docstring''' lowercase_ : str = TFFlaubertForQuestionAnsweringSimple(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = {"input_ids": input_ids, "lengths": input_lengths} lowercase_ : Optional[Any] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase__ ( self : List[Any] , a : Union[str, Any] , a : Optional[Any] , a : Union[str, Any] , a : int , a : Tuple , a : Any , a : Optional[int] , a : Optional[Any] , a : Any , ): '''simple docstring''' lowercase_ : List[str] = TFFlaubertForSequenceClassification(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = {"input_ids": input_ids, "lengths": input_lengths} lowercase_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase__ ( self : Any , a : Union[str, Any] , a : Dict , a : List[str] , a : int , a : Optional[int] , a : Tuple , a : Optional[Any] , a : str , a : Optional[int] , ): '''simple docstring''' lowercase_ : Any = self.num_labels lowercase_ : int = TFFlaubertForTokenClassification(config=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowercase_ : int = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self : Optional[Any] , a : Optional[Any] , a : str , a : List[Any] , a : Any , a : Union[str, Any] , a : Union[str, Any] , a : str , a : Tuple , a : Optional[int] , ): '''simple docstring''' lowercase_ : List[Any] = self.num_choices lowercase_ : Tuple = TFFlaubertForMultipleChoice(config=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) lowercase_ : List[str] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) lowercase_ : int = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) lowercase_ : str = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } lowercase_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ : Tuple = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : List[str] = config_and_inputs lowercase_ : List[str] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): __lowerCamelCase: List[Any] = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) __lowerCamelCase: Union[str, Any] = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __lowerCamelCase: Optional[int] = ( { """feature-extraction""": TFFlaubertModel, """fill-mask""": TFFlaubertWithLMHeadModel, """question-answering""": TFFlaubertForQuestionAnsweringSimple, """text-classification""": TFFlaubertForSequenceClassification, """token-classification""": TFFlaubertForTokenClassification, """zero-shot""": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) __lowerCamelCase: Dict = False __lowerCamelCase: str = False def lowerCAmelCase__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : Dict , a : Dict ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowerCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ : List[str] = TFFlaubertModelTester(self ) lowercase_ : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , emb_dim=3_7 ) def lowerCAmelCase__ ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*__SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*__SCREAMING_SNAKE_CASE ) @slow def lowerCAmelCase__ ( self : Any ): '''simple docstring''' for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Union[str, Any] = TFFlaubertModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_tf @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): @slow def lowerCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ : Tuple = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) lowercase_ : str = tf.convert_to_tensor( [[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" lowercase_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )[0] lowercase_ : List[str] = tf.TensorShape((1, 8, 5_1_2) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. lowercase_ : int = tf.convert_to_tensor( [ [ [-1.876_8773, -1.56_6555, 0.2707_2418], [-1.692_0038, -0.587_3505, 1.932_9599], [-2.956_3985, -1.699_3835, 1.797_2052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
620
'''simple docstring''' from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class _UpperCAmelCase ( lowerCAmelCase_ ): a : Union[str, Any] =["""image_processor"""] a : Dict ="""SamImageProcessor""" def __init__( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.image_processor __lowerCAmelCase = -10 __lowerCAmelCase = self.image_processor.size["""longest_edge"""] def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = self.image_processor( __SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,) # pop arguments that are not used in the foward but used nevertheless __lowerCAmelCase = encoding_image_processor["""original_sizes"""] if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor __lowerCAmelCase = original_sizes.numpy() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points( input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = self._normalize_and_convert( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,) return encoding_image_processor def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",): '''simple docstring''' if input_points is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points ] else: __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: __lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_labels is not None: __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_boxes is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE ) for box in input_boxes ] else: __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE ) for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) ] __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_boxes is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # boxes batch size of 1 by default __lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # boxes batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"""input_boxes""": input_boxes} ) if input_points is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"""input_points""": input_points} ) if input_labels is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"""input_labels""": input_labels} ) return encoding_image_processor def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = max([point.shape[0] for point in input_points] ) __lowerCAmelCase = [] for i, point in enumerate(__SCREAMING_SNAKE_CASE ): if point.shape[0] != expected_nb_points: __lowerCAmelCase = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 ) __lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] ) processed_input_points.append(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = processed_input_points return input_points, input_labels def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase = original_size __lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE ) if is_bounding_box: __lowerCAmelCase = coords.reshape(-1,2,2 ) __lowerCAmelCase = coords[..., 0] * (new_w / old_w) __lowerCAmelCase = coords[..., 1] * (new_h / old_h) if is_bounding_box: __lowerCAmelCase = coords.reshape(-1,4 ) return coords def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,): '''simple docstring''' if input_points is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor __lowerCAmelCase = input_points.numpy().tolist() if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ): raise ValueError("""Input points must be a list of list of floating points.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points] else: __lowerCAmelCase = None if input_labels is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): __lowerCAmelCase = input_labels.numpy().tolist() if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ): raise ValueError("""Input labels must be a list of list integers.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels] else: __lowerCAmelCase = None if input_boxes is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): __lowerCAmelCase = input_boxes.numpy().tolist() if ( not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE ) or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE ) ): raise ValueError("""Input boxes must be a list of list of list of floating points.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes] else: __lowerCAmelCase = None return input_points, input_labels, input_boxes @property def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) ) def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
689
0
'''simple docstring''' import numpy as np def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : List[str] ): """simple docstring""" return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
442
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.17.0.dev0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") _a : int = logging.getLogger(__name__) @dataclass class _UpperCAmelCase : a : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) a : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , ) a : int =field( default=10_24 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) a : bool =field( default=lowerCAmelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} ) a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} ) def lowerCamelCase__ ( self ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: __lowerCAmelCase = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __lowerCAmelCase = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _UpperCAmelCase : a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a : str =field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a : bool =field( default=lowerCAmelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def _lowerCAmelCase ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) __lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(lowercase ) datasets.utils.logging.set_verbosity(lowercase ) transformers.utils.logging.set_verbosity(lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __lowerCAmelCase = data_args.train_file.split(""".""" )[-1] __lowerCAmelCase = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __lowerCAmelCase = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f'load a local file for {key}: {data_files[key]}' ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files __lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names __lowerCAmelCase = len(lowercase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __lowerCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , ) __lowerCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __lowerCAmelCase = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __lowerCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. __lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1} __lowerCAmelCase = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(lowercase ): # Tokenize the texts def _convert_table_text_to_pandas(lowercase ): __lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] __lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __lowerCAmelCase = examples["""statement"""] __lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) __lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase ) __lowerCAmelCase = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): __lowerCAmelCase = raw_datasets.map( lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) __lowerCAmelCase = raw_datasets["""train"""] if data_args.max_train_samples is not None: __lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) __lowerCAmelCase = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: __lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) __lowerCAmelCase = raw_datasets["""test"""] if data_args.max_predict_samples is not None: __lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(lowercase ) ) , 3 ): logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowercase ): __lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions __lowerCAmelCase = np.argmax(lowercase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __lowerCAmelCase = default_data_collator elif training_args.fpaa: __lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 ) else: __lowerCAmelCase = None # Initialize our Trainer __lowerCAmelCase = Trainer( model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , ) # Training if training_args.do_train: __lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: __lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCAmelCase = last_checkpoint __lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase ) __lowerCAmelCase = train_result.metrics __lowerCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase ) ) __lowerCAmelCase = min(lowercase , len(lowercase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , lowercase ) trainer.save_metrics("""train""" , lowercase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase ) __lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase ) __lowerCAmelCase = min(lowercase , len(lowercase ) ) trainer.log_metrics("""eval""" , lowercase ) trainer.save_metrics("""eval""" , lowercase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __lowerCAmelCase = predict_dataset.remove_columns("""label""" ) __lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions __lowerCAmelCase = np.argmax(lowercase , axis=1 ) __lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(lowercase , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(lowercase ): __lowerCAmelCase = label_list[item] writer.write(f'{index}\t{item}\n' ) __lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**lowercase ) else: trainer.create_model_card(**lowercase ) def _lowerCAmelCase ( lowercase ) -> str: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
689
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""", """facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class UpperCAmelCase ( lowerCAmelCase_ ): A__ : Any = """xlm-roberta-xl""" def __init__(self : List[Any] , snake_case__ : Tuple=25_08_80 , snake_case__ : Dict=25_60 , snake_case__ : int=36 , snake_case__ : str=32 , snake_case__ : str=1_02_40 , snake_case__ : int="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Dict=5_14 , snake_case__ : str=1 , snake_case__ : Optional[int]=0.02 , snake_case__ : Optional[int]=1e-05 , snake_case__ : Optional[int]=1 , snake_case__ : str=0 , snake_case__ : List[str]=2 , snake_case__ : Tuple="absolute" , snake_case__ : Any=True , snake_case__ : List[str]=None , **snake_case__ : List[str] , ) -> int: '''simple docstring''' super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) snake_case : Optional[int] = vocab_size snake_case : int = hidden_size snake_case : List[str] = num_hidden_layers snake_case : List[Any] = num_attention_heads snake_case : Any = hidden_act snake_case : List[Any] = intermediate_size snake_case : List[str] = hidden_dropout_prob snake_case : Dict = attention_probs_dropout_prob snake_case : Tuple = max_position_embeddings snake_case : Any = type_vocab_size snake_case : List[Any] = initializer_range snake_case : Optional[Any] = layer_norm_eps snake_case : Any = position_embedding_type snake_case : List[Any] = use_cache snake_case : Dict = classifier_dropout class UpperCAmelCase ( lowerCAmelCase_ ): @property def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]: '''simple docstring''' if self.task == "multiple-choice": snake_case : List[str] = {0: "batch", 1: "choice", 2: "sequence"} else: snake_case : Tuple = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
204
'''simple docstring''' import os import sys import unittest _a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""") class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = find_backend(""" if not is_torch_available():""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") __lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") __lowerCAmelCase = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""",__SCREAMING_SNAKE_CASE ) self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE ) self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE ) self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""",objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" ) __lowerCAmelCase = create_dummy_object("""function""","""'torch'""" ) self.assertEqual( __SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) __lowerCAmelCase = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ __lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ __lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
689
0
from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 _snake_case : str = { # 1536-bit 5: { """prime""": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", base=16, ), """generator""": 2, }, # 2048-bit 14: { """prime""": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", base=16, ), """generator""": 2, }, # 3072-bit 15: { """prime""": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", base=16, ), """generator""": 2, }, # 4096-bit 16: { """prime""": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + "FFFFFFFFFFFFFFFF", base=16, ), """generator""": 2, }, # 6144-bit 17: { """prime""": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + "6DCC4024FFFFFFFFFFFFFFFF", base=16, ), """generator""": 2, }, # 8192-bit 18: { """prime""": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", base=16, ), """generator""": 2, }, } class UpperCamelCase_ : '''simple docstring''' def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] = 14 ) ->Dict: if group not in primes: raise ValueError("Unsupported Group" ) lowercase = primes[group]["prime"] lowercase = primes[group]["generator"] lowercase = int(hexlify(urandom(32 ) ) , base=16 ) def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->str: return hex(self.__private_key )[2:] def SCREAMING_SNAKE_CASE( self :int ) ->Tuple: lowercase = pow(self.generator , self.__private_key , self.prime ) return hex(__SCREAMING_SNAKE_CASE )[2:] def SCREAMING_SNAKE_CASE( self :Dict , lowerCAmelCase__ :Dict ) ->Union[str, Any]: return ( 2 <= key <= self.prime - 2 and pow(__SCREAMING_SNAKE_CASE , (self.prime - 1) // 2 , self.prime ) == 1 ) def SCREAMING_SNAKE_CASE( self :str , lowerCAmelCase__ :Union[str, Any] ) ->Dict: lowercase = int(__SCREAMING_SNAKE_CASE , base=16 ) if not self.is_valid_public_key(__SCREAMING_SNAKE_CASE ): raise ValueError("Invalid public key" ) lowercase = pow(__SCREAMING_SNAKE_CASE , self.__private_key , self.prime ) return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest() @staticmethod def SCREAMING_SNAKE_CASE( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) ->Any: return ( 2 <= remote_public_key_str <= prime - 2 and pow(__SCREAMING_SNAKE_CASE , (prime - 1) // 2 , __SCREAMING_SNAKE_CASE ) == 1 ) @staticmethod def SCREAMING_SNAKE_CASE( lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] = 14 ) ->int: lowercase = int(__SCREAMING_SNAKE_CASE , base=16 ) lowercase = int(__SCREAMING_SNAKE_CASE , base=16 ) lowercase = primes[group]["prime"] if not DiffieHellman.is_valid_public_key_static(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError("Invalid public key" ) lowercase = pow(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
441
'''simple docstring''' def _lowerCAmelCase ( lowercase ) -> tuple[int, int]: try: __lowerCAmelCase = float(lowercase ) except ValueError: raise ValueError("""Please enter a valid number""" ) __lowerCAmelCase = decimal - int(lowercase ) if fractional_part == 0: return int(lowercase ), 1 else: __lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] ) __lowerCAmelCase = int(decimal * (10**number_of_frac_digits) ) __lowerCAmelCase = 10**number_of_frac_digits __lowerCAmelCase , __lowerCAmelCase = denominator, numerator while True: __lowerCAmelCase = dividend % divisor if remainder == 0: break __lowerCAmelCase , __lowerCAmelCase = divisor, remainder __lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor return int(lowercase ), int(lowercase ) if __name__ == "__main__": print(f'{decimal_to_fraction(2) = }') print(f'{decimal_to_fraction(89.0) = }') print(f'{decimal_to_fraction("67") = }') print(f'{decimal_to_fraction("45.0") = }') print(f'{decimal_to_fraction(1.5) = }') print(f'{decimal_to_fraction("6.25") = }') print(f'{decimal_to_fraction("78td") = }')
689
0
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def __UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str ): """simple docstring""" if (ksize % 2) == 0: a_ = ksize + 1 a_ = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(lowercase_ ): for x in range(lowercase_ ): # distance from center a_ = x - ksize // 2 a_ = y - ksize // 2 # degree to radiant a_ = theta / 180 * np.pi a_ = np.cos(_theta ) a_ = np.sin(_theta ) # get kernel x a_ = cos_theta * px + sin_theta * py # get kernel y a_ = -sin_theta * px + cos_theta * py # fill kernel a_ = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image __lowerCAmelCase = imread("../image_data/lena.jpg") # turn image in gray scale value __lowerCAmelCase = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges __lowerCAmelCase = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 1_20, 1_50]: __lowerCAmelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) __lowerCAmelCase = out / out.max() * 2_55 __lowerCAmelCase = out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
536
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _a : Dict = _symbol_database.Default() _a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile( b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03""" ) _a : str = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals) if _descriptor._USE_C_DESCRIPTORS is False: _a : str = None _a : Union[str, Any] = b"""H\003""" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _a : Optional[int] = 4_5 _a : List[Any] = 1_5_8_1 _a : str = 1_5_1_7 _a : Optional[Any] = 1_5_7_0 _a : List[str] = 1_5_8_4 _a : List[Any] = 1_7_9_3 _a : Union[str, Any] = 1_7_9_5 _a : Tuple = 1_9_1_6 _a : List[Any] = 1_8_6_4 _a : Any = 1_9_0_5 _a : Optional[Any] = 1_9_1_9 _a : Optional[int] = 2_4_2_9 _a : Tuple = 2_2_0_8 _a : Optional[Any] = 2_4_1_8 _a : List[Any] = 2_3_2_3 _a : str = 2_4_0_7 # @@protoc_insertion_point(module_scope)
689
0
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' return int((input_a, input_a).count(0 ) != 0 ) def _lowerCamelCase ( ): '''simple docstring''' assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
636
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class _UpperCAmelCase ( lowerCAmelCase_ ): a : torch.FloatTensor class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,): '''simple docstring''' super().__init__() __lowerCAmelCase = layers_per_block __lowerCAmelCase = torch.nn.Convad( __SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,) __lowerCAmelCase = None __lowerCAmelCase = nn.ModuleList([] ) # down __lowerCAmelCase = block_out_channels[0] for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = output_channel __lowerCAmelCase = block_out_channels[i] __lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1 __lowerCAmelCase = get_down_block( __SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) self.down_blocks.append(__SCREAMING_SNAKE_CASE ) # mid __lowerCAmelCase = UNetMidBlockaD( in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) # out __lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 ) __lowerCAmelCase = nn.SiLU() __lowerCAmelCase = 2 * out_channels if double_z else out_channels __lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 ) __lowerCAmelCase = False def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = x __lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE ) if self.training and self.gradient_checkpointing: def create_custom_forward(__SCREAMING_SNAKE_CASE ): def custom_forward(*__SCREAMING_SNAKE_CASE ): return module(*__SCREAMING_SNAKE_CASE ) return custom_forward # down if is_torch_version(""">=""","""1.11.0""" ): for down_block in self.down_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) else: for down_block in self.down_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE ) else: # down for down_block in self.down_blocks: __lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE ) # post-process __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE ) return sample class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",): '''simple docstring''' super().__init__() __lowerCAmelCase = layers_per_block __lowerCAmelCase = nn.Convad( __SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,) __lowerCAmelCase = None __lowerCAmelCase = nn.ModuleList([] ) __lowerCAmelCase = in_channels if norm_type == """spatial""" else None # mid __lowerCAmelCase = UNetMidBlockaD( in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) # up __lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = output_channel __lowerCAmelCase = reversed_block_out_channels[i] __lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1 __lowerCAmelCase = get_up_block( __SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,) self.up_blocks.append(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = output_channel # out if norm_type == "spatial": __lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 ) __lowerCAmelCase = nn.SiLU() __lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 ) __lowerCAmelCase = False def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' __lowerCAmelCase = z __lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(__SCREAMING_SNAKE_CASE ): def custom_forward(*__SCREAMING_SNAKE_CASE ): return module(*__SCREAMING_SNAKE_CASE ) return custom_forward if is_torch_version(""">=""","""1.11.0""" ): # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) else: # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) else: # middle __lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) # post-process if latent_embeds is None: __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE ) return sample class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ): '''simple docstring''' super().__init__() __lowerCAmelCase = n_e __lowerCAmelCase = vq_embed_dim __lowerCAmelCase = beta __lowerCAmelCase = legacy __lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e ) __lowerCAmelCase = remap if self.remap is not None: self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) ) __lowerCAmelCase = self.used.shape[0] __lowerCAmelCase = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": __lowerCAmelCase = self.re_embed __lowerCAmelCase = self.re_embed + 1 print( f'Remapping {self.n_e} indices to {self.re_embed} indices. ' f'Using {self.unknown_index} for unknown indices.' ) else: __lowerCAmelCase = n_e __lowerCAmelCase = sane_index_shape def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = inds.shape assert len(__SCREAMING_SNAKE_CASE ) > 1 __lowerCAmelCase = inds.reshape(ishape[0],-1 ) __lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long() __lowerCAmelCase = match.argmax(-1 ) __lowerCAmelCase = match.sum(2 ) < 1 if self.unknown_index == "random": __lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device ) else: __lowerCAmelCase = self.unknown_index return new.reshape(__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = inds.shape assert len(__SCREAMING_SNAKE_CASE ) > 1 __lowerCAmelCase = inds.reshape(ishape[0],-1 ) __lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE ) if self.re_embed > self.used.shape[0]: # extra token __lowerCAmelCase = 0 # simply set to zero __lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE ) return back.reshape(__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = z.permute(0,2,3,1 ).contiguous() __lowerCAmelCase = z.view(-1,self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z __lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 ) __lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape ) __lowerCAmelCase = None __lowerCAmelCase = None # compute loss for embedding if not self.legacy: __lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: __lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients __lowerCAmelCase = z + (z_q - z).detach() # reshape back to match original input shape __lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous() if self.remap is not None: __lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis __lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten if self.sane_index_shape: __lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): '''simple docstring''' if self.remap is not None: __lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis __lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = indices.reshape(-1 ) # flatten again # get quantized latent vectors __lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ) if shape is not None: __lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE ) # reshape back to match original input shape __lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous() return z_q class _UpperCAmelCase ( lowerCAmelCase_ ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase = parameters __lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 ) __lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 ) __lowerCAmelCase = deterministic __lowerCAmelCase = torch.exp(0.5 * self.logvar ) __lowerCAmelCase = torch.exp(self.logvar ) if self.deterministic: __lowerCAmelCase = __lowerCAmelCase = torch.zeros_like( self.mean,device=self.parameters.device,dtype=self.parameters.dtype ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ): '''simple docstring''' __lowerCAmelCase = randn_tensor( self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype ) __lowerCAmelCase = self.mean + self.std * sample return x def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean,2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar,dim=[1, 2, 3],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) __lowerCAmelCase = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): '''simple docstring''' return self.mean
689
0
"""simple docstring""" def __lowerCAmelCase ( lowercase : Optional[int] ) -> str: """simple docstring""" snake_case : Optional[int] = len(lowercase ) for i in range(length - 1 ): snake_case : Tuple = i for k in range(i + 1 , lowercase ): if collection[k] < collection[least]: snake_case : Dict = k if least != i: snake_case ,snake_case : str = (collection[i], collection[least]) return collection if __name__ == "__main__": __snake_case = input("""Enter numbers separated by a comma:\n""").strip() __snake_case = [int(item) for item in user_input.split(""",""")] print(selection_sort(unsorted))
178
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features _a : Optional[int] = logging.get_logger(__name__) _a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) _a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} ) a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} ) a : int =field( default=1_28 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : int =field( default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , ) a : int =field( default=64 , metadata={ """help""": ( """The maximum number of tokens for the question. Questions longer than this will """ """be truncated to this length.""" ) } , ) a : int =field( default=30 , metadata={ """help""": ( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ) } , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} ) a : float =field( default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) a : int =field( default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) a : int =field( default=0 , metadata={ """help""": ( """language id of input for language-specific xlm models (see""" """ tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)""" ) } , ) a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} ) class _UpperCAmelCase ( lowerCAmelCase_ ): a : Optional[Any] ="""train""" a : Optional[int] ="""dev""" class _UpperCAmelCase ( lowerCAmelCase_ ): a : SquadDataTrainingArguments a : List[SquadFeatures] a : Split a : bool def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",): '''simple docstring''' __lowerCAmelCase = args __lowerCAmelCase = is_language_sensitive __lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): try: __lowerCAmelCase = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) __lowerCAmelCase = mode # Load data features from cache or dataset file __lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1""" __lowerCAmelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowerCAmelCase = cached_features_file + """.lock""" with FileLock(__SCREAMING_SNAKE_CASE ): if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache: __lowerCAmelCase = time.time() __lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. __lowerCAmelCase = self.old_features["""features"""] __lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE ) logger.info( f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in' """ future run""" ) else: if mode == Split.dev: __lowerCAmelCase = self.processor.get_dev_examples(args.data_dir ) else: __lowerCAmelCase = self.processor.get_train_examples(args.data_dir ) __lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features( examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = time.time() torch.save( {"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = self.features[i] __lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float ) __lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float ) __lowerCAmelCase = { """input_ids""": input_ids, """attention_mask""": attention_mask, """token_type_ids""": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} ) if self.args.version_2_with_negative: inputs.update({"""is_impossible""": is_impossible} ) if self.is_language_sensitive: inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: __lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long ) inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} ) return inputs
689
0
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed A__ = """true""" def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=82 , __lowerCAmelCase=16 ) -> List[Any]: """simple docstring""" set_seed(42 ) snake_case__ : Optional[int] = RegressionModel() snake_case__ : int = deepcopy(__lowerCAmelCase ) snake_case__ : Union[str, Any] = RegressionDataset(length=__lowerCAmelCase ) snake_case__ : List[Any] = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase ) model.to(accelerator.device ) snake_case__ , snake_case__ : Tuple = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) return model, ddp_model, dataloader def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[int]: """simple docstring""" snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' ) snake_case__ : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' ) def tokenize_function(__lowerCAmelCase ): snake_case__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase ) return outputs with accelerator.main_process_first(): snake_case__ : List[str] = dataset.map( __lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) snake_case__ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__lowerCAmelCase ): if use_longest: return tokenizer.pad(__lowerCAmelCase , padding='''longest''' , return_tensors='''pt''' ) return tokenizer.pad(__lowerCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=16 ) def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: """simple docstring""" snake_case__ : str = Accelerator(dispatch_batches=__lowerCAmelCase , split_batches=__lowerCAmelCase ) snake_case__ : Any = get_dataloader(__lowerCAmelCase , not dispatch_batches ) snake_case__ : List[str] = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowerCAmelCase ) snake_case__ , snake_case__ : Dict = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: """simple docstring""" snake_case__ : Union[str, Any] = [] for batch in dataloader: snake_case__ , snake_case__ : int = batch.values() with torch.no_grad(): snake_case__ : Tuple = model(__lowerCAmelCase ) snake_case__ , snake_case__ : List[str] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) snake_case__ , snake_case__ : List[str] = [], [] for logit, targ in logits_and_targets: logits.append(__lowerCAmelCase ) targs.append(__lowerCAmelCase ) snake_case__ , snake_case__ : Any = torch.cat(__lowerCAmelCase ), torch.cat(__lowerCAmelCase ) return logits, targs def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=82 , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=16 ) -> List[str]: """simple docstring""" snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = get_basic_setup(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) snake_case__ , snake_case__ : List[Any] = generate_predictions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) assert ( len(__lowerCAmelCase ) == num_samples ), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowerCAmelCase )}""" def _lowerCAmelCase ( __lowerCAmelCase = False , __lowerCAmelCase = False ) -> int: """simple docstring""" snake_case__ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' ) snake_case__ , snake_case__ : int = get_mrpc_setup(__lowerCAmelCase , __lowerCAmelCase ) # First do baseline snake_case__ , snake_case__ , snake_case__ : Any = setup['''no'''] model.to(__lowerCAmelCase ) model.eval() for batch in dataloader: batch.to(__lowerCAmelCase ) with torch.inference_mode(): snake_case__ : Optional[Any] = model(**__lowerCAmelCase ) snake_case__ : Union[str, Any] = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__lowerCAmelCase , references=batch['''labels'''] ) snake_case__ : Any = metric.compute() # Then do distributed snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): snake_case__ : Optional[Any] = model(**__lowerCAmelCase ) snake_case__ : Optional[int] = outputs.logits.argmax(dim=-1 ) snake_case__ : Optional[int] = batch['''labels'''] snake_case__ , snake_case__ : int = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__lowerCAmelCase , references=__lowerCAmelCase ) snake_case__ : Union[str, Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def _lowerCAmelCase ( ) -> str: """simple docstring""" snake_case__ : Union[str, Any] = Accelerator(split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" ) test_mrpc(__lowerCAmelCase , __lowerCAmelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: snake_case__ : List[Any] = Accelerator(split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase ) if accelerator.is_local_main_process: print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" ) test_torch_metrics(__lowerCAmelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''' ) snake_case__ : Union[str, Any] = Accelerator() test_torch_metrics(__lowerCAmelCase , 512 ) accelerator.state._reset_state() def _lowerCAmelCase ( __lowerCAmelCase ) -> str: """simple docstring""" main() if __name__ == "__main__": main()
252
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _lowerCAmelCase ( lowercase ) -> Optional[Any]: # vision encoder if "img_encoder.pos_embed" in name: __lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: __lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: __lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: __lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: __lowerCAmelCase = name.replace("""blocks""" , """layers""" ) if "attn" in name and "pre_assign" not in name: __lowerCAmelCase = name.replace("""attn""" , """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: __lowerCAmelCase = name.replace("""proj""" , """out_proj""" ) if "pre_assign_attn.attn.proj" in name: __lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: __lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" ) if "img_encoder.norm" in name: __lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: __lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: __lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: __lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" ) if "ln_1" in name: __lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: __lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: __lowerCAmelCase = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: __lowerCAmelCase = name.replace("""c_proj""" , """fc2""" ) if "text_encoder" in name: __lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" ) if "ln_final" in name: __lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: __lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" ) if "img_projector.linear_out." in name: __lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: __lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" ) if "text_projector.linear_out" in name: __lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> Dict: for key in orig_state_dict.copy().keys(): __lowerCAmelCase = orig_state_dict.pop(lowercase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __lowerCAmelCase = key.split(""".""" ) __lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] ) __lowerCAmelCase = config.vision_config.hidden_size if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[dim : dim * 2, :] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val[:dim] __lowerCAmelCase = val[dim : dim * 2] __lowerCAmelCase = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __lowerCAmelCase = key.split(""".""" ) __lowerCAmelCase = int(key_split[3] ) __lowerCAmelCase = config.text_config.hidden_size if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[ dim : dim * 2, : ] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val[:dim] __lowerCAmelCase = val[dim : dim * 2] __lowerCAmelCase = val[-dim:] else: __lowerCAmelCase = rename_key(lowercase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): __lowerCAmelCase = val.squeeze_() else: __lowerCAmelCase = val return orig_state_dict def _lowerCAmelCase ( ) -> str: __lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]: __lowerCAmelCase = GroupViTConfig() __lowerCAmelCase = GroupViTModel(lowercase ).eval() __lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""] __lowerCAmelCase = convert_state_dict(lowercase , lowercase ) __lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0) # verify result __lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) __lowerCAmelCase = prepare_img() __lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" ) with torch.no_grad(): __lowerCAmelCase = model(**lowercase ) if model_name == "groupvit-gcc-yfcc": __lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] ) elif model_name == "groupvit-gcc-redcaps": __lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 ) processor.save_pretrained(lowercase ) model.save_pretrained(lowercase ) print("""Successfully saved processor and model to""" , lowercase ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(lowercase , organization="""nielsr""" ) model.push_to_hub(lowercase , organization="""nielsr""" ) if __name__ == "__main__": _a : int = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _a : List[str] = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
689
0
import string import numpy def __lowerCAmelCase ( A , A ): return b if a == 0 else greatest_common_divisor(b % a , A ) class __UpperCamelCase : SCREAMING_SNAKE_CASE__ = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) SCREAMING_SNAKE_CASE__ = numpy.vectorize(lambda lowercase : x % 36 ) SCREAMING_SNAKE_CASE__ = numpy.vectorize(lowerCAmelCase_ ) def __init__( self : Dict , lowerCAmelCase : Tuple ): '''simple docstring''' UpperCAmelCase_ = self.modulus(__SCREAMING_SNAKE_CASE ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key UpperCAmelCase_ = encrypt_key.shape[0] def __A ( self : Dict , lowerCAmelCase : Any ): '''simple docstring''' return self.key_string.index(__SCREAMING_SNAKE_CASE ) def __A ( self : int , lowerCAmelCase : Union[str, Any] ): '''simple docstring''' return self.key_string[round(__SCREAMING_SNAKE_CASE )] def __A ( self : Dict ): '''simple docstring''' UpperCAmelCase_ = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: UpperCAmelCase_ = det % len(self.key_string ) UpperCAmelCase_ = len(self.key_string ) if greatest_common_divisor(__SCREAMING_SNAKE_CASE , len(self.key_string ) ) != 1: UpperCAmelCase_ = ( F"determinant modular {req_l} of encryption key({det}) " F"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(__SCREAMING_SNAKE_CASE ) def __A ( self : str , lowerCAmelCase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ = [char for char in text.upper() if char in self.key_string] UpperCAmelCase_ = chars[-1] while len(__SCREAMING_SNAKE_CASE ) % self.break_key != 0: chars.append(__SCREAMING_SNAKE_CASE ) return "".join(__SCREAMING_SNAKE_CASE ) def __A ( self : List[str] , lowerCAmelCase : int ): '''simple docstring''' UpperCAmelCase_ = self.process_text(text.upper() ) UpperCAmelCase_ = "" for i in range(0 , len(__SCREAMING_SNAKE_CASE ) - self.break_key + 1 , self.break_key ): UpperCAmelCase_ = text[i : i + self.break_key] UpperCAmelCase_ = [self.replace_letters(__SCREAMING_SNAKE_CASE ) for char in batch] UpperCAmelCase_ = numpy.array([vec] ).T UpperCAmelCase_ = self.modulus(self.encrypt_key.dot(__SCREAMING_SNAKE_CASE ) ).T.tolist()[ 0 ] UpperCAmelCase_ = "".join( self.replace_digits(__SCREAMING_SNAKE_CASE ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def __A ( self : int ): '''simple docstring''' UpperCAmelCase_ = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: UpperCAmelCase_ = det % len(self.key_string ) UpperCAmelCase_ = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: UpperCAmelCase_ = i break UpperCAmelCase_ = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(__SCREAMING_SNAKE_CASE ) ) def __A ( self : Union[str, Any] , lowerCAmelCase : List[Any] ): '''simple docstring''' UpperCAmelCase_ = self.make_decrypt_key() UpperCAmelCase_ = self.process_text(text.upper() ) UpperCAmelCase_ = "" for i in range(0 , len(__SCREAMING_SNAKE_CASE ) - self.break_key + 1 , self.break_key ): UpperCAmelCase_ = text[i : i + self.break_key] UpperCAmelCase_ = [self.replace_letters(__SCREAMING_SNAKE_CASE ) for char in batch] UpperCAmelCase_ = numpy.array([vec] ).T UpperCAmelCase_ = self.modulus(decrypt_key.dot(__SCREAMING_SNAKE_CASE ) ).T.tolist()[0] UpperCAmelCase_ = "".join( self.replace_digits(__SCREAMING_SNAKE_CASE ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def __lowerCAmelCase ( ): UpperCAmelCase_ = int(input("Enter the order of the encryption key: " ) ) UpperCAmelCase_ = [] print("Enter each row of the encryption key with space separated integers" ) for _ in range(A ): UpperCAmelCase_ = [int(A ) for x in input().split()] hill_matrix.append(A ) UpperCAmelCase_ = HillCipher(numpy.array(A ) ) print("Would you like to encrypt or decrypt some text? (1 or 2)" ) UpperCAmelCase_ = input("\n1. Encrypt\n2. Decrypt\n" ) if option == "1": UpperCAmelCase_ = input("What text would you like to encrypt?: " ) print("Your encrypted text is:" ) print(hc.encrypt(A ) ) elif option == "2": UpperCAmelCase_ = input("What text would you like to decrypt?: " ) print("Your decrypted text is:" ) print(hc.decrypt(A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
162
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() _a : Tuple = logging.get_logger(__name__) _a : Optional[int] = ["""model.decoder.embed_positions.weights"""] def _lowerCAmelCase ( lowercase ) -> Optional[Any]: if "emb" in name: __lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: __lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: __lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: __lowerCAmelCase = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: __lowerCAmelCase = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: __lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: __lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: __lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: __lowerCAmelCase = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: __lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]: __lowerCAmelCase = list(state_dict.keys() ) __lowerCAmelCase = {} for key in keys: __lowerCAmelCase = state_dict.pop(lowercase ) __lowerCAmelCase = rename_keys(lowercase ) if "in_proj_weight" in key: # split fused qkv proj __lowerCAmelCase = val[:hidden_size, :] __lowerCAmelCase = val[hidden_size : 2 * hidden_size, :] __lowerCAmelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __lowerCAmelCase = val else: __lowerCAmelCase = val return state_dict, enc_dec_proj_state_dict def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values __lowerCAmelCase = 1024 __lowerCAmelCase = 24 __lowerCAmelCase = 16 elif checkpoint == "medium": __lowerCAmelCase = 1536 __lowerCAmelCase = 48 __lowerCAmelCase = 24 elif checkpoint == "large": __lowerCAmelCase = 2048 __lowerCAmelCase = 48 __lowerCAmelCase = 32 else: raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) __lowerCAmelCase = MusicgenDecoderConfig( hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , ) return config @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]: __lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase ) __lowerCAmelCase = decoder_config_from_checkpoint(lowercase ) __lowerCAmelCase = fairseq_model.lm.state_dict() __lowerCAmelCase , __lowerCAmelCase = rename_state_dict( lowercase , hidden_size=decoder_config.hidden_size ) __lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" ) __lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) __lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(lowercase ) if len(lowercase ) > 0: raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' ) if len(lowercase ) > 0: raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model __lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(lowercase ) # check we can do a forward pass __lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __lowerCAmelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits if logits.shape != (8, 1, 2048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor __lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" ) __lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) __lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase ) # set the appropriate bos/pad token ids __lowerCAmelCase = 2048 __lowerCAmelCase = 2048 # set other default generation config params __lowerCAmelCase = int(30 * audio_encoder.config.frame_rate ) __lowerCAmelCase = True __lowerCAmelCase = 3.0 if pytorch_dump_folder is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(lowercase ) processor.save_pretrained(lowercase ) if repo_id: logger.info(f'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(lowercase ) processor.push_to_hub(lowercase ) if __name__ == "__main__": _a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) _a : List[Any] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
689
0
'''simple docstring''' from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def _lowercase (SCREAMING_SNAKE_CASE = True , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ): '''simple docstring''' if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." ) __A : Optional[int] = False if main_process_only: __A : Any = PartialState().local_process_index == 0 return _tqdm(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , disable=SCREAMING_SNAKE_CASE )
111
'''simple docstring''' from collections import deque def _lowerCAmelCase ( lowercase ) -> Dict: __lowerCAmelCase = len(lowercase ) __lowerCAmelCase = deque() __lowerCAmelCase = [False for _ in range(lowercase )] __lowerCAmelCase = [-1 for _ in range(lowercase )] __lowerCAmelCase = index_of[:] def strong_connect(lowercase , lowercase , lowercase ): __lowerCAmelCase = index # the number when this node is seen __lowerCAmelCase = index # lowest rank node reachable from here index += 1 stack.append(lowercase ) __lowerCAmelCase = True for w in g[v]: if index_of[w] == -1: __lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase ) __lowerCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: __lowerCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: __lowerCAmelCase = [] __lowerCAmelCase = stack.pop() __lowerCAmelCase = False component.append(lowercase ) while w != v: __lowerCAmelCase = stack.pop() __lowerCAmelCase = False component.append(lowercase ) components.append(lowercase ) return index __lowerCAmelCase = [] for v in range(lowercase ): if index_of[v] == -1: strong_connect(lowercase , 0 , lowercase ) return components def _lowerCAmelCase ( lowercase , lowercase ) -> str: __lowerCAmelCase = [[] for _ in range(lowercase )] for u, v in edges: g[u].append(lowercase ) return g if __name__ == "__main__": # Test _a : Any = 7 _a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6] _a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5] _a : Optional[Any] = [(u, v) for u, v in zip(source, target)] _a : Optional[int] = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
689
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE__ ( metaclass=lowerCAmelCase_ ): _UpperCAmelCase =["""onnx"""] def __init__( self: Any , *a: int , **a: Union[str, Any]) ->Dict: '''simple docstring''' requires_backends(self , ["onnx"]) @classmethod def _lowerCAmelCase ( cls: Optional[Any] , *a: Tuple , **a: List[str]) ->str: '''simple docstring''' requires_backends(cls , ["onnx"]) @classmethod def _lowerCAmelCase ( cls: Tuple , *a: List[Any] , **a: str) ->str: '''simple docstring''' requires_backends(cls , ["onnx"])
685
'''simple docstring''' from argparse import ArgumentParser from .env import EnvironmentCommand def _lowerCAmelCase ( ) -> Union[str, Any]: __lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) __lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(lowercase ) # Let's go __lowerCAmelCase = parser.parse_args() if not hasattr(lowercase , """func""" ): parser.print_help() exit(1 ) # Run __lowerCAmelCase = args.func(lowercase ) service.run() if __name__ == "__main__": main()
689
0
'''simple docstring''' import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowercase_ : str = os.path.abspath(_UpperCamelCase ) logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model lowercase_ : Optional[int] = tf.train.list_variables(_UpperCamelCase ) lowercase_ : str = [] lowercase_ : List[Any] = [] lowercase_ : List[Any] = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") lowercase_ : Union[str, Any] = full_name.split("/" ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(F"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' lowercase_ : Tuple = name[1:] # figure out how many levels deep the name is lowercase_ : int = 0 for _name in name: if _name.startswith("layer_with_weights" ): depth += 1 else: break layer_depth.append(_UpperCamelCase ) # read data lowercase_ : Any = tf.train.load_variable(_UpperCamelCase , _UpperCamelCase ) names.append("/".join(_UpperCamelCase ) ) arrays.append(_UpperCamelCase ) logger.info(F"""Read a total of {len(_UpperCamelCase ):,} layers""" ) # Sanity check if len(set(_UpperCamelCase ) ) != 1: raise ValueError(F"""Found layer names with different depths (layer depth {list(set(_UpperCamelCase ) )})""" ) lowercase_ : Tuple = list(set(_UpperCamelCase ) )[0] if layer_depth != 1: raise ValueError( "The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP" " heads." ) # convert layers logger.info("Converting weights..." ) for full_name, array in zip(_UpperCamelCase , _UpperCamelCase ): lowercase_ : str = full_name.split("/" ) lowercase_ : int = model lowercase_ : int = [] for i, m_name in enumerate(_UpperCamelCase ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("layer_with_weights" ): lowercase_ : List[str] = int(m_name.split("-" )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["embeddings", "LayerNorm"] ) lowercase_ : int = getattr(_UpperCamelCase , "embeddings" ) lowercase_ : Optional[Any] = getattr(_UpperCamelCase , "LayerNorm" ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["encoder", "layer", str(layer_num - 4 )] ) lowercase_ : Tuple = getattr(_UpperCamelCase , "encoder" ) lowercase_ : Union[str, Any] = getattr(_UpperCamelCase , "layer" ) lowercase_ : Union[str, Any] = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["pooler", "dense"] ) lowercase_ : Dict = getattr(_UpperCamelCase , "pooler" ) lowercase_ : List[str] = getattr(_UpperCamelCase , "dense" ) elif m_name == "embeddings": trace.append("embeddings" ) lowercase_ : Any = getattr(_UpperCamelCase , "embeddings" ) if layer_num == 0: trace.append("word_embeddings" ) lowercase_ : Union[str, Any] = getattr(_UpperCamelCase , "word_embeddings" ) elif layer_num == 1: trace.append("position_embeddings" ) lowercase_ : str = getattr(_UpperCamelCase , "position_embeddings" ) elif layer_num == 2: trace.append("token_type_embeddings" ) lowercase_ : Union[str, Any] = getattr(_UpperCamelCase , "token_type_embeddings" ) else: raise ValueError(F"""Unknown embedding layer with name {full_name}""" ) trace.append("weight" ) lowercase_ : int = getattr(_UpperCamelCase , "weight" ) elif m_name == "_attention_layer": # self-attention layer trace.extend(["attention", "self"] ) lowercase_ : List[str] = getattr(_UpperCamelCase , "attention" ) lowercase_ : Tuple = getattr(_UpperCamelCase , "self" ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["attention", "output", "LayerNorm"] ) lowercase_ : List[Any] = getattr(_UpperCamelCase , "attention" ) lowercase_ : List[str] = getattr(_UpperCamelCase , "output" ) lowercase_ : Union[str, Any] = getattr(_UpperCamelCase , "LayerNorm" ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(["attention", "output", "dense"] ) lowercase_ : int = getattr(_UpperCamelCase , "attention" ) lowercase_ : List[str] = getattr(_UpperCamelCase , "output" ) lowercase_ : Tuple = getattr(_UpperCamelCase , "dense" ) elif m_name == "_output_dense": # output dense trace.extend(["output", "dense"] ) lowercase_ : Dict = getattr(_UpperCamelCase , "output" ) lowercase_ : Tuple = getattr(_UpperCamelCase , "dense" ) elif m_name == "_output_layer_norm": # output dense trace.extend(["output", "LayerNorm"] ) lowercase_ : Any = getattr(_UpperCamelCase , "output" ) lowercase_ : Any = getattr(_UpperCamelCase , "LayerNorm" ) elif m_name == "_key_dense": # attention key trace.append("key" ) lowercase_ : str = getattr(_UpperCamelCase , "key" ) elif m_name == "_query_dense": # attention query trace.append("query" ) lowercase_ : str = getattr(_UpperCamelCase , "query" ) elif m_name == "_value_dense": # attention value trace.append("value" ) lowercase_ : List[Any] = getattr(_UpperCamelCase , "value" ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["intermediate", "dense"] ) lowercase_ : Dict = getattr(_UpperCamelCase , "intermediate" ) lowercase_ : List[Any] = getattr(_UpperCamelCase , "dense" ) elif m_name == "_output_layer_norm": # output layer norm trace.append("output" ) lowercase_ : str = getattr(_UpperCamelCase , "output" ) # weights & biases elif m_name in ["bias", "beta"]: trace.append("bias" ) lowercase_ : Union[str, Any] = getattr(_UpperCamelCase , "bias" ) elif m_name in ["kernel", "gamma"]: trace.append("weight" ) lowercase_ : Tuple = getattr(_UpperCamelCase , "weight" ) else: logger.warning(F"""Ignored {m_name}""" ) # for certain layers reshape is necessary lowercase_ : Optional[int] = ".".join(_UpperCamelCase ) if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , _UpperCamelCase ) or re.match( R"(\S+)\.attention\.output\.dense\.weight" , _UpperCamelCase ): lowercase_ : List[Any] = array.reshape(pointer.data.shape ) if "kernel" in full_name: lowercase_ : Dict = array.transpose() if pointer.shape == array.shape: lowercase_ : Any = torch.from_numpy(_UpperCamelCase ) else: raise ValueError( F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" F""" {array.shape}""" ) logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" logger.info(F"""Loading model based on config from {config_path}...""" ) lowercase_ : Union[str, Any] = BertConfig.from_json_file(_UpperCamelCase ) lowercase_ : List[Any] = BertModel(_UpperCamelCase ) # Load weights from checkpoint logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save pytorch-model logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict() , _UpperCamelCase ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument( '--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.' ) parser.add_argument( '--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model (must include filename).', ) UpperCamelCase__ = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
620
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _a : List[Any] = logging.get_logger(__name__) _a : int = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } _a : Any = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str: for attribute in key.split(""".""" ): __lowerCAmelCase = getattr(lowercase , lowercase ) if weight_type is not None: __lowerCAmelCase = getattr(lowercase , lowercase ).shape else: __lowerCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": __lowerCAmelCase = value elif weight_type == "weight_g": __lowerCAmelCase = value elif weight_type == "weight_v": __lowerCAmelCase = value elif weight_type == "bias": __lowerCAmelCase = value else: __lowerCAmelCase = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]: __lowerCAmelCase = [] __lowerCAmelCase = fairseq_model.state_dict() __lowerCAmelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , ) __lowerCAmelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key): # special case since naming is very similar continue __lowerCAmelCase = True if "*" in mapped_key: __lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2] __lowerCAmelCase = mapped_key.replace("""*""" , lowercase ) if "weight_g" in name: __lowerCAmelCase = """weight_g""" elif "weight_v" in name: __lowerCAmelCase = """weight_v""" elif "bias" in name: __lowerCAmelCase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase = """weight""" else: __lowerCAmelCase = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(f'Unused weights: {unused_weights}' ) def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __lowerCAmelCase = full_name.split("""conv_layers.""" )[-1] __lowerCAmelCase = name.split(""".""" ) __lowerCAmelCase = int(items[0] ) __lowerCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowercase ) @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict: if config_path is not None: __lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase ) else: __lowerCAmelCase = UniSpeechSatConfig() __lowerCAmelCase = """""" if is_finetuned: __lowerCAmelCase = UniSpeechSatForCTC(lowercase ) else: __lowerCAmelCase = UniSpeechSatForPreTraining(lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) __lowerCAmelCase = model[0].eval() recursively_load_weights(lowercase , lowercase ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": _a : List[str] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) _a : Union[str, Any] = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
689
0
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging lowerCAmelCase_ : Tuple = """\ """ lowerCAmelCase_ : Tuple = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ lowerCAmelCase_ : Optional[Any] = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , ) def lowercase_ ( self : Union[str, Any] , lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Tuple = 16 , lowercase__ : Union[str, Any] = True , lowercase__ : List[str]=None ): '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": a_ : Dict = """cuda""" else: a_ : List[str] = """cuda""" if torch.cuda.is_available() else """cpu""" a_ : List[Any] = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE ) a_ : Optional[Any] = model.to(__SCREAMING_SNAKE_CASE ) a_ : int = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: a_ : Tuple = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__SCREAMING_SNAKE_CASE ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" a_ : List[str] = model.config.max_length - 1 else: a_ : Any = model.config.max_length a_ : Union[str, Any] = tokenizer( __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , return_attention_mask=__SCREAMING_SNAKE_CASE , ).to(__SCREAMING_SNAKE_CASE ) a_ : Tuple = encodings["""input_ids"""] a_ : int = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." a_ : List[str] = [] a_ : List[Any] = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ): a_ : Union[str, Any] = min(start_index + batch_size , len(__SCREAMING_SNAKE_CASE ) ) a_ : str = encoded_texts[start_index:end_index] a_ : Optional[Any] = attn_masks[start_index:end_index] if add_start_token: a_ : Optional[int] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE ) a_ : int = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) a_ : Dict = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask] , dim=1 ) a_ : Optional[Any] = encoded_batch with torch.no_grad(): a_ : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).logits a_ : Union[str, Any] = out_logits[..., :-1, :].contiguous() a_ : Union[str, Any] = labels[..., 1:].contiguous() a_ : Any = attn_mask[..., 1:].contiguous() a_ : List[Any] = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
442
'''simple docstring''' from scipy.stats import spearmanr import datasets _a : str = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ _a : Dict = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ _a : List[str] = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): def lowerCamelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
689
0
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ): return int((input_a, input_a).count(0 ) == 0 ) def UpperCamelCase ( ): assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
204
'''simple docstring''' from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ): a : List[str] =["""onnx"""] def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' requires_backends(self,["""onnx"""] ) @classmethod def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' requires_backends(cls,["""onnx"""] ) @classmethod def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' requires_backends(cls,["""onnx"""] )
689
0
import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _snake_case : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def __snake_case ( __magic_name__ ): '''simple docstring''' warnings.warn( "The preprocess method is deprecated and will be removed in a future version. Please" " use VaeImageProcessor.preprocess instead" , __magic_name__ , ) if isinstance(__magic_name__ , torch.Tensor ): return image elif isinstance(__magic_name__ , PIL.Image.Image ): lowercase = [image] if isinstance(image[0] , PIL.Image.Image ): lowercase , lowercase = image[0].size lowercase , lowercase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 lowercase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] lowercase = np.concatenate(__magic_name__ , axis=0 ) lowercase = np.array(__magic_name__ ).astype(np.floataa ) / 255.0 lowercase = image.transpose(0 , 3 , 1 , 2 ) lowercase = 2.0 * image - 1.0 lowercase = torch.from_numpy(__magic_name__ ) elif isinstance(image[0] , torch.Tensor ): lowercase = torch.cat(__magic_name__ , dim=0 ) return image def __snake_case ( __magic_name__ ): '''simple docstring''' if isinstance(__magic_name__ , torch.Tensor ): return mask elif isinstance(__magic_name__ , PIL.Image.Image ): lowercase = [mask] if isinstance(mask[0] , PIL.Image.Image ): lowercase , lowercase = mask[0].size lowercase , lowercase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowercase = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask] lowercase = np.concatenate(__magic_name__ , axis=0 ) lowercase = mask.astype(np.floataa ) / 255.0 lowercase = 0 lowercase = 1 lowercase = torch.from_numpy(__magic_name__ ) elif isinstance(mask[0] , torch.Tensor ): lowercase = torch.cat(__magic_name__ , dim=0 ) return mask class UpperCamelCase_ ( lowerCAmelCase_ ): '''simple docstring''' UpperCamelCase : UNetaDModel UpperCamelCase : RePaintScheduler def __init__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) ->List[str]: super().__init__() self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] = 250 , lowerCAmelCase__ :int = 0.0 , lowerCAmelCase__ :List[Any] = 10 , lowerCAmelCase__ :int = 10 , lowerCAmelCase__ :Union[str, Any] = None , lowerCAmelCase__ :int = "pil" , lowerCAmelCase__ :Dict = True , ) ->Union[str, Any]: lowercase = image lowercase = _preprocess_image(__SCREAMING_SNAKE_CASE ) lowercase = original_image.to(device=self.device , dtype=self.unet.dtype ) lowercase = _preprocess_mask(__SCREAMING_SNAKE_CASE ) lowercase = mask_image.to(device=self.device , dtype=self.unet.dtype ) lowercase = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase = original_image.shape lowercase = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device ) lowercase = eta lowercase = self.scheduler.timesteps[0] + 1 lowercase = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual lowercase = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample # compute previous image: x_t -> x_t-1 lowercase = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample else: # compute the reverse: x_t-1 -> x_t lowercase = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase = t lowercase = (image / 2 + 0.5).clamp(0 , 1 ) lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase = self.numpy_to_pil(__SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
441
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _a : int = logging.get_logger(__name__) _a : Optional[int] = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class _UpperCAmelCase ( lowerCAmelCase_ ): a : List[str] ="""gptj""" a : Optional[int] ={ """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = vocab_size __lowerCAmelCase = n_positions __lowerCAmelCase = n_embd __lowerCAmelCase = n_layer __lowerCAmelCase = n_head __lowerCAmelCase = n_inner __lowerCAmelCase = rotary_dim __lowerCAmelCase = activation_function __lowerCAmelCase = resid_pdrop __lowerCAmelCase = embd_pdrop __lowerCAmelCase = attn_pdrop __lowerCAmelCase = layer_norm_epsilon __lowerCAmelCase = initializer_range __lowerCAmelCase = use_cache __lowerCAmelCase = bos_token_id __lowerCAmelCase = eos_token_id super().__init__( bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ) class _UpperCAmelCase ( lowerCAmelCase_ ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,): '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE ) if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ): # TODO: how to do that better? __lowerCAmelCase = 0 @property def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" ) __lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""} else: __lowerCAmelCase = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowerCamelCase__ ( self ): '''simple docstring''' return self._config.n_layer @property def lowerCamelCase__ ( self ): '''simple docstring''' return self._config.n_head def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,): '''simple docstring''' __lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs( __SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() __lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __lowerCAmelCase = seqlen + 2 __lowerCAmelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __lowerCAmelCase = [ (torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] __lowerCAmelCase = common_inputs["""attention_mask"""] if self.use_past: __lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype __lowerCAmelCase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 ) return ordered_inputs @property def lowerCamelCase__ ( self ): '''simple docstring''' return 13
689
0
'''simple docstring''' from __future__ import annotations def __UpperCamelCase ( lowercase_ : int ): """simple docstring""" create_state_space_tree(lowercase_ , [] , 0 , [0 for i in range(len(lowercase_ ) )] ) def __UpperCamelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , ): """simple docstring""" if index == len(lowercase_ ): print(lowercase_ ) return for i in range(len(lowercase_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) a_ = True create_state_space_tree(lowercase_ , lowercase_ , index + 1 , lowercase_ ) current_sequence.pop() a_ = False __lowerCAmelCase = [3, 1, 2, 4] generate_all_permutations(sequence) __lowerCAmelCase = ["A", "B", "C"] generate_all_permutations(sequence_a)
536
'''simple docstring''' def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int: __lowerCAmelCase = set() __lowerCAmelCase = int((limit - 24) ** (1 / 2) ) __lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) ) for primea in primes: __lowerCAmelCase = primea * primea for primea in primes: __lowerCAmelCase = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: __lowerCAmelCase = primea * primea * primea * primea __lowerCAmelCase = square + cube + tetr if total >= limit: break ret.add(lowercase ) return len(lowercase ) if __name__ == "__main__": print(f'{solution() = }')
689
0
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A : Union[str, Any] = { """configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""], """feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""], """processing_mctct""": ["""MCTCTProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = [ """MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MCTCTForCTC""", """MCTCTModel""", """MCTCTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
636
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ): a : Optional[int] =TextToVideoSDPipeline a : Optional[int] =TEXT_TO_IMAGE_PARAMS a : Any =TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. a : Union[str, Any] =frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def lowerCamelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,) __lowerCAmelCase = DDIMScheduler( beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,) torch.manual_seed(0 ) __lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,) __lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ): '''simple docstring''' if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = """np""" __lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames __lowerCAmelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) __lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase__ ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",) def lowerCamelCase__ ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowerCamelCase__ ( self ): '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowerCamelCase__ ( self ): '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def lowerCamelCase__ ( self ): '''simple docstring''' pass def lowerCamelCase__ ( self ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) __lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) __lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __lowerCAmelCase = pipe.to("""cuda""" ) __lowerCAmelCase = """Spiderman is surfing""" __lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames __lowerCAmelCase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) __lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) __lowerCAmelCase = pipe.to("""cuda""" ) __lowerCAmelCase = """Spiderman is surfing""" __lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames __lowerCAmelCase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
689
0
"""simple docstring""" import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(lowercase , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def __lowerCAmelCase ( lowercase : int , lowercase : Dict ) -> Optional[int]: """simple docstring""" snake_case : Tuple = _distribute_shards(**lowercase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : Any ) -> Dict: """simple docstring""" snake_case : List[Any] = _split_gen_kwargs(lowercase , lowercase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def __lowerCAmelCase ( lowercase : Any , lowercase : str ) -> int: """simple docstring""" if expected is RuntimeError: with pytest.raises(lowercase ): _number_of_shards_in_gen_kwargs(lowercase ) else: snake_case : Union[str, Any] = _number_of_shards_in_gen_kwargs(lowercase ) assert out == expected
178
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _lowerCAmelCase ( lowercase ) -> Optional[int]: if not is_accelerate_available(): return method __lowerCAmelCase = version.parse(accelerate.__version__ ).base_version if version.parse(lowercase ) < version.parse("""0.17.0""" ): return method def wrapper(self , *lowercase , **lowercase ): if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ): self._hf_hook.pre_forward(self ) return method(self , *lowercase , **lowercase ) return wrapper
689
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) A__ = { """configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""MobileViTFeatureExtractor"""] A__ = ["""MobileViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MobileViTForImageClassification""", """MobileViTForSemanticSegmentation""", """MobileViTModel""", """MobileViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFMobileViTForImageClassification""", """TFMobileViTForSemanticSegmentation""", """TFMobileViTModel""", """TFMobileViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
252
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: # load base model __lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors __lowerCAmelCase = load_file(lowercase ) __lowerCAmelCase = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: __lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) __lowerCAmelCase = pipeline.text_encoder else: __lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) __lowerCAmelCase = pipeline.unet # find the target layer __lowerCAmelCase = layer_infos.pop(0 ) while len(lowercase ) > -1: try: __lowerCAmelCase = curr_layer.__getattr__(lowercase ) if len(lowercase ) > 0: __lowerCAmelCase = layer_infos.pop(0 ) elif len(lowercase ) == 0: break except Exception: if len(lowercase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: __lowerCAmelCase = layer_infos.pop(0 ) __lowerCAmelCase = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) ) pair_keys.append(lowercase ) else: pair_keys.append(lowercase ) pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: __lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) __lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 ) else: __lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa ) __lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ) # update visited list for item in pair_keys: visited.append(lowercase ) return pipeline if __name__ == "__main__": _a : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") _a : Optional[int] = parser.parse_args() _a : Dict = args.base_model_path _a : Optional[Any] = args.checkpoint_path _a : Union[str, Any] = args.dump_path _a : Optional[int] = args.lora_prefix_unet _a : int = args.lora_prefix_text_encoder _a : str = args.alpha _a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _a : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
689
0
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _a: List[str] = 10 def __lowerCAmelCase ( A , A , A , A ): for i in range(A , A ): if array[i] == target: return i return -1 def __lowerCAmelCase ( A , A ): UpperCAmelCase_ = 0 UpperCAmelCase_ = len(A ) while left <= right: if right - left < precision: return lin_search(A , A , A , A ) UpperCAmelCase_ = (left + right) // 3 + 1 UpperCAmelCase_ = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: UpperCAmelCase_ = one_third - 1 elif array[two_third] < target: UpperCAmelCase_ = two_third + 1 else: UpperCAmelCase_ = one_third + 1 UpperCAmelCase_ = two_third - 1 else: return -1 def __lowerCAmelCase ( A , A , A , A ): if left < right: if right - left < precision: return lin_search(A , A , A , A ) UpperCAmelCase_ = (left + right) // 3 + 1 UpperCAmelCase_ = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(A , one_third - 1 , A , A ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , A , A , A ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , A , A ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _a: Union[str, Any] = input("""Enter numbers separated by comma:\n""").strip() _a: Union[str, Any] = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), F"List must be ordered.\n{collection}." _a: str = int(input("""Enter the number to be found in the list:\n""").strip()) _a: str = ite_ternary_search(collection, target) _a: int = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F'Iterative search: {target} found at positions: {resulta}') print(F'Recursive search: {target} found at positions: {resulta}') else: print("""Not found""")
162
'''simple docstring''' from collections import Counter from timeit import timeit def _lowerCAmelCase ( lowercase = "" , ) -> bool: return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2 def _lowerCAmelCase ( lowercase = "" ) -> bool: if len(lowercase ) == 0: return True __lowerCAmelCase = input_str.replace(""" """ , """""" ).lower() # character_freq_dict: Stores the frequency of every character in the input string __lowerCAmelCase = {} for character in lower_case_input_str: __lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1 __lowerCAmelCase = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def _lowerCAmelCase ( lowercase = "" ) -> None: print("""\nFor string = """ , lowercase , """:""" ) print( """> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) print( """> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) if __name__ == "__main__": _a : int = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) _a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str) print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
689
0
'''simple docstring''' from graphs.minimum_spanning_tree_kruskal import kruskal def _lowercase (): '''simple docstring''' __A : str = 9 __A : List[str] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __A : Any = kruskal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __A : Any = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE )
111
'''simple docstring''' import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _lowerCAmelCase ( lowercase ) -> List[Any]: __lowerCAmelCase = VideoMAEConfig() set_architecture_configs(lowercase , lowercase ) if "finetuned" not in model_name: __lowerCAmelCase = False if "finetuned" in model_name: __lowerCAmelCase = """huggingface/label-files""" if "kinetics" in model_name: __lowerCAmelCase = 400 __lowerCAmelCase = """kinetics400-id2label.json""" elif "ssv2" in model_name: __lowerCAmelCase = 174 __lowerCAmelCase = """something-something-v2-id2label.json""" else: raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" ) __lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) ) __lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} return config def _lowerCAmelCase ( lowercase , lowercase ) -> Any: if "small" in model_name: __lowerCAmelCase = 384 __lowerCAmelCase = 1536 __lowerCAmelCase = 12 __lowerCAmelCase = 16 __lowerCAmelCase = 12 __lowerCAmelCase = 3 __lowerCAmelCase = 192 __lowerCAmelCase = 768 elif "large" in model_name: __lowerCAmelCase = 1024 __lowerCAmelCase = 4096 __lowerCAmelCase = 24 __lowerCAmelCase = 16 __lowerCAmelCase = 12 __lowerCAmelCase = 8 __lowerCAmelCase = 512 __lowerCAmelCase = 2048 elif "huge" in model_name: __lowerCAmelCase = 1280 __lowerCAmelCase = 5120 __lowerCAmelCase = 32 __lowerCAmelCase = 16 __lowerCAmelCase = 12 __lowerCAmelCase = 8 __lowerCAmelCase = 640 __lowerCAmelCase = 2560 elif "base" not in model_name: raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" ) def _lowerCAmelCase ( lowercase ) -> List[str]: if "encoder." in name: __lowerCAmelCase = name.replace("""encoder.""" , """""" ) if "cls_token" in name: __lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" ) if "decoder_pos_embed" in name: __lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: __lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" ) if "decoder.blocks" in name: __lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: __lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" ) if "attn.proj" in name: __lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "bias" not in name: __lowerCAmelCase = name.replace("""attn""" , """attention.self""" ) if "attn" in name: __lowerCAmelCase = name.replace("""attn""" , """attention.attention""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: __lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: __lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: __lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: __lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: __lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" ) if "head" in name and "decoder" not in name: __lowerCAmelCase = name.replace("""head""" , """classifier""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]: for key in orig_state_dict.copy().keys(): __lowerCAmelCase = orig_state_dict.pop(lowercase ) if key.startswith("""encoder.""" ): __lowerCAmelCase = key.replace("""encoder.""" , """""" ) if "qkv" in key: __lowerCAmelCase = key.split(""".""" ) if key.startswith("""decoder.blocks""" ): __lowerCAmelCase = config.decoder_hidden_size __lowerCAmelCase = int(key_split[2] ) __lowerCAmelCase = """decoder.decoder_layers.""" if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[dim : dim * 2, :] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = config.hidden_size __lowerCAmelCase = int(key_split[1] ) __lowerCAmelCase = """videomae.encoder.layer.""" if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[dim : dim * 2, :] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val return orig_state_dict def _lowerCAmelCase ( ) -> str: __lowerCAmelCase = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __lowerCAmelCase = np.load(lowercase ) return list(lowercase ) def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __lowerCAmelCase = get_videomae_config(lowercase ) if "finetuned" in model_name: __lowerCAmelCase = VideoMAEForVideoClassification(lowercase ) else: __lowerCAmelCase = VideoMAEForPreTraining(lowercase ) # download original checkpoint, hosted on Google Drive __lowerCAmelCase = """pytorch_model.bin""" gdown.cached_download(lowercase , lowercase , quiet=lowercase ) __lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" ) if "model" in files: __lowerCAmelCase = files["""model"""] else: __lowerCAmelCase = files["""module"""] __lowerCAmelCase = convert_state_dict(lowercase , lowercase ) model.load_state_dict(lowercase ) model.eval() # verify model on basic input __lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) __lowerCAmelCase = prepare_video() __lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" ) if "finetuned" not in model_name: __lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" ) __lowerCAmelCase = torch.load(lowercase ) __lowerCAmelCase = model(**lowercase ) __lowerCAmelCase = outputs.logits __lowerCAmelCase = [ """videomae-small-finetuned-kinetics""", """videomae-small-finetuned-ssv2""", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) """videomae-base-short""", """videomae-base-short-finetuned-kinetics""", """videomae-base""", """videomae-base-finetuned-kinetics""", """videomae-large""", """videomae-large-finetuned-kinetics""", """videomae-huge-finetuned-kinetics""", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) """videomae-base-short-ssv2""", """videomae-base-short-finetuned-ssv2""", """videomae-base-ssv2""", """videomae-base-finetuned-ssv2""", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": __lowerCAmelCase = torch.Size([1, 174] ) __lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one __lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": __lowerCAmelCase = torch.Size([1, 174] ) __lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": __lowerCAmelCase = torch.Size([1, 174] ) __lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(f'Model name not supported. Should be one of {model_names}' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 ) else: print("""Logits:""" , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 ) print("""Logits ok!""" ) # verify loss, if applicable if model_name == "videomae-base-short": __lowerCAmelCase = outputs.loss assert torch.allclose(lowercase , lowercase , atol=1e-4 ) print("""Loss ok!""" ) if pytorch_dump_folder_path is not None: print(f'Saving model and image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowercase ) model.save_pretrained(lowercase ) if push_to_hub: print("""Pushing to the hub...""" ) model.push_to_hub(lowercase , organization="""nielsr""" ) if __name__ == "__main__": _a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""", type=str, help=( """URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct""" """ download link.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default="""/Users/nielsrogge/Documents/VideoMAE/Test""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : int = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
689
0
'''simple docstring''' import datasets a_ = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ a_ = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ a_ = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple: '''simple docstring''' return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def _lowerCAmelCase ( self: int) ->Tuple: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"), }) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def _lowerCAmelCase ( self: List[Any] , a: str , a: str) ->Optional[Any]: '''simple docstring''' return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)}
685
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _a : Tuple = """\ """ _a : Tuple = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ _a : Optional[Any] = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): def lowerCamelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": __lowerCAmelCase = """cuda""" else: __lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu""" __lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: __lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__SCREAMING_SNAKE_CASE ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" __lowerCAmelCase = model.config.max_length - 1 else: __lowerCAmelCase = model.config.max_length __lowerCAmelCase = tokenizer( __SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = encodings["""input_ids"""] __lowerCAmelCase = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." __lowerCAmelCase = [] __lowerCAmelCase = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ): __lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = encoded_texts[start_index:end_index] __lowerCAmelCase = attn_masks[start_index:end_index] if add_start_token: __lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 ) __lowerCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 ) __lowerCAmelCase = encoded_batch with torch.no_grad(): __lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits __lowerCAmelCase = out_logits[..., :-1, :].contiguous() __lowerCAmelCase = labels[..., 1:].contiguous() __lowerCAmelCase = attn_mask[..., 1:].contiguous() __lowerCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
689
0
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 10**12 ): """simple docstring""" lowercase_ : List[str] = 1 lowercase_ : List[Any] = 0 lowercase_ : List[str] = 1 lowercase_ : Dict = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f"""{solution() = }""")
620
'''simple docstring''' from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class _UpperCAmelCase ( lowerCAmelCase_ ): a : Union[str, Any] =["""image_processor"""] a : Dict ="""SamImageProcessor""" def __init__( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.image_processor __lowerCAmelCase = -10 __lowerCAmelCase = self.image_processor.size["""longest_edge"""] def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = self.image_processor( __SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,) # pop arguments that are not used in the foward but used nevertheless __lowerCAmelCase = encoding_image_processor["""original_sizes"""] if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor __lowerCAmelCase = original_sizes.numpy() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points( input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = self._normalize_and_convert( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,) return encoding_image_processor def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",): '''simple docstring''' if input_points is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points ] else: __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: __lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_labels is not None: __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_boxes is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE ) for box in input_boxes ] else: __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE ) for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) ] __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_boxes is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # boxes batch size of 1 by default __lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # boxes batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"""input_boxes""": input_boxes} ) if input_points is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"""input_points""": input_points} ) if input_labels is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"""input_labels""": input_labels} ) return encoding_image_processor def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = max([point.shape[0] for point in input_points] ) __lowerCAmelCase = [] for i, point in enumerate(__SCREAMING_SNAKE_CASE ): if point.shape[0] != expected_nb_points: __lowerCAmelCase = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 ) __lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] ) processed_input_points.append(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = processed_input_points return input_points, input_labels def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase = original_size __lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE ) if is_bounding_box: __lowerCAmelCase = coords.reshape(-1,2,2 ) __lowerCAmelCase = coords[..., 0] * (new_w / old_w) __lowerCAmelCase = coords[..., 1] * (new_h / old_h) if is_bounding_box: __lowerCAmelCase = coords.reshape(-1,4 ) return coords def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,): '''simple docstring''' if input_points is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor __lowerCAmelCase = input_points.numpy().tolist() if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ): raise ValueError("""Input points must be a list of list of floating points.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points] else: __lowerCAmelCase = None if input_labels is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): __lowerCAmelCase = input_labels.numpy().tolist() if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ): raise ValueError("""Input labels must be a list of list integers.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels] else: __lowerCAmelCase = None if input_boxes is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): __lowerCAmelCase = input_boxes.numpy().tolist() if ( not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE ) or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE ) ): raise ValueError("""Input boxes must be a list of list of list of floating points.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes] else: __lowerCAmelCase = None return input_points, input_labels, input_boxes @property def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) ) def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
689
0
'''simple docstring''' from __future__ import annotations import math class SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , lowercase__ : Tuple ): '''simple docstring''' a_ : str = size # approximate the overall size of segment tree with given value a_ : List[Any] = [0 for i in range(0 , 4 * size )] # create array to store lazy update a_ : int = [0 for i in range(0 , 4 * size )] a_ : int = [0 for i in range(0 , 4 * size )] # flag for lazy update def lowercase_ ( self : Tuple , lowercase__ : List[Any] ): '''simple docstring''' return idx * 2 def lowercase_ ( self : str , lowercase__ : Tuple ): '''simple docstring''' return idx * 2 + 1 def lowercase_ ( self : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Union[str, Any] ): '''simple docstring''' if left_element == right_element: a_ : str = a[left_element - 1] else: a_ : Tuple = (left_element + right_element) // 2 self.build(self.left(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.build(self.right(__SCREAMING_SNAKE_CASE ) , mid + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a_ : Any = max( self.segment_tree[self.left(__SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(__SCREAMING_SNAKE_CASE )] ) def lowercase_ ( self : str , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Dict ): '''simple docstring''' if self.flag[idx] is True: a_ : Optional[int] = self.lazy[idx] a_ : Union[str, Any] = False if left_element != right_element: a_ : int = self.lazy[idx] a_ : List[str] = self.lazy[idx] a_ : List[Any] = True a_ : List[Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: a_ : str = val if left_element != right_element: a_ : Tuple = val a_ : str = val a_ : Optional[Any] = True a_ : Union[str, Any] = True return True a_ : Tuple = (left_element + right_element) // 2 self.update(self.left(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.update(self.right(__SCREAMING_SNAKE_CASE ) , mid + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a_ : str = max( self.segment_tree[self.left(__SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(__SCREAMING_SNAKE_CASE )] ) return True def lowercase_ ( self : Dict , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : List[str] , lowercase__ : List[str] ): '''simple docstring''' if self.flag[idx] is True: a_ : Any = self.lazy[idx] a_ : Dict = False if left_element != right_element: a_ : List[str] = self.lazy[idx] a_ : str = self.lazy[idx] a_ : Tuple = True a_ : Tuple = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] a_ : Any = (left_element + right_element) // 2 a_ : str = self.query(self.left(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a_ : Any = self.query(self.right(__SCREAMING_SNAKE_CASE ) , mid + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __str__( self : Dict ): '''simple docstring''' return str([self.query(1 , 1 , self.size , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": lowerCAmelCase_ : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] lowerCAmelCase_ : Dict = 15 lowerCAmelCase_ : Optional[Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
442
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.17.0.dev0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") _a : int = logging.getLogger(__name__) @dataclass class _UpperCAmelCase : a : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) a : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , ) a : int =field( default=10_24 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) a : bool =field( default=lowerCAmelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} ) a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} ) def lowerCamelCase__ ( self ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: __lowerCAmelCase = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __lowerCAmelCase = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _UpperCAmelCase : a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a : str =field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a : bool =field( default=lowerCAmelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def _lowerCAmelCase ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) __lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(lowercase ) datasets.utils.logging.set_verbosity(lowercase ) transformers.utils.logging.set_verbosity(lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __lowerCAmelCase = data_args.train_file.split(""".""" )[-1] __lowerCAmelCase = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __lowerCAmelCase = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f'load a local file for {key}: {data_files[key]}' ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files __lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names __lowerCAmelCase = len(lowercase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __lowerCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , ) __lowerCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __lowerCAmelCase = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __lowerCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. __lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1} __lowerCAmelCase = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(lowercase ): # Tokenize the texts def _convert_table_text_to_pandas(lowercase ): __lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] __lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __lowerCAmelCase = examples["""statement"""] __lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) __lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase ) __lowerCAmelCase = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): __lowerCAmelCase = raw_datasets.map( lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) __lowerCAmelCase = raw_datasets["""train"""] if data_args.max_train_samples is not None: __lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) __lowerCAmelCase = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: __lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) __lowerCAmelCase = raw_datasets["""test"""] if data_args.max_predict_samples is not None: __lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(lowercase ) ) , 3 ): logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowercase ): __lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions __lowerCAmelCase = np.argmax(lowercase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __lowerCAmelCase = default_data_collator elif training_args.fpaa: __lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 ) else: __lowerCAmelCase = None # Initialize our Trainer __lowerCAmelCase = Trainer( model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , ) # Training if training_args.do_train: __lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: __lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCAmelCase = last_checkpoint __lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase ) __lowerCAmelCase = train_result.metrics __lowerCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase ) ) __lowerCAmelCase = min(lowercase , len(lowercase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , lowercase ) trainer.save_metrics("""train""" , lowercase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase ) __lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase ) __lowerCAmelCase = min(lowercase , len(lowercase ) ) trainer.log_metrics("""eval""" , lowercase ) trainer.save_metrics("""eval""" , lowercase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __lowerCAmelCase = predict_dataset.remove_columns("""label""" ) __lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions __lowerCAmelCase = np.argmax(lowercase , axis=1 ) __lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(lowercase , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(lowercase ): __lowerCAmelCase = label_list[item] writer.write(f'{index}\t{item}\n' ) __lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**lowercase ) else: trainer.create_model_card(**lowercase ) def _lowerCAmelCase ( lowercase ) -> str: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
689
0
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """nvidia/segformer-b0-finetuned-ade-512-512""": ( """https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json""" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class UpperCAmelCase ( lowerCAmelCase_ ): A__ : List[str] = """segformer""" def __init__(self : str , snake_case__ : str=3 , snake_case__ : int=4 , snake_case__ : Optional[int]=[2, 2, 2, 2] , snake_case__ : List[str]=[8, 4, 2, 1] , snake_case__ : int=[32, 64, 1_60, 2_56] , snake_case__ : Dict=[7, 3, 3, 3] , snake_case__ : List[str]=[4, 2, 2, 2] , snake_case__ : int=[1, 2, 5, 8] , snake_case__ : Optional[Any]=[4, 4, 4, 4] , snake_case__ : str="gelu" , snake_case__ : str=0.0 , snake_case__ : Any=0.0 , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.02 , snake_case__ : int=0.1 , snake_case__ : Dict=1e-6 , snake_case__ : str=2_56 , snake_case__ : Optional[Any]=2_55 , **snake_case__ : str , ) -> List[Any]: '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( "Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be" " removed, as the behaviour will default to that of reshape_last_stage = True." , __SCREAMING_SNAKE_CASE , ) snake_case : Optional[Any] = num_channels snake_case : Optional[Any] = num_encoder_blocks snake_case : Union[str, Any] = depths snake_case : Optional[int] = sr_ratios snake_case : Union[str, Any] = hidden_sizes snake_case : List[str] = patch_sizes snake_case : Dict = strides snake_case : str = mlp_ratios snake_case : Tuple = num_attention_heads snake_case : Dict = hidden_act snake_case : List[str] = hidden_dropout_prob snake_case : List[Any] = attention_probs_dropout_prob snake_case : List[Any] = classifier_dropout_prob snake_case : Union[str, Any] = initializer_range snake_case : str = drop_path_rate snake_case : Optional[Any] = layer_norm_eps snake_case : Any = decoder_hidden_size snake_case : Any = kwargs.get("reshape_last_stage" , __SCREAMING_SNAKE_CASE ) snake_case : Optional[Any] = semantic_loss_ignore_index class UpperCAmelCase ( lowerCAmelCase_ ): A__ : List[Any] = version.parse("1.11" ) @property def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Any: '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _SCREAMING_SNAKE_CASE (self : int ) -> int: '''simple docstring''' return 1e-4 @property def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]: '''simple docstring''' return 12
204
'''simple docstring''' import os import sys import unittest _a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""") class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = find_backend(""" if not is_torch_available():""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") __lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") __lowerCAmelCase = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""",__SCREAMING_SNAKE_CASE ) self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE ) self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE ) self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""",objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" ) __lowerCAmelCase = create_dummy_object("""function""","""'torch'""" ) self.assertEqual( __SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) __lowerCAmelCase = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ __lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ __lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
689
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging _snake_case : str = logging.get_logger(__name__) _snake_case : List[str] = """▁""" _snake_case : List[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""} _snake_case : List[str] = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), } } _snake_case : Dict = { """facebook/mbart-large-en-ro""": 10_24, """facebook/mbart-large-cc25""": 10_24, } # fmt: off _snake_case : Optional[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class UpperCamelCase_ ( lowerCAmelCase_ ): '''simple docstring''' UpperCamelCase : List[Any] = VOCAB_FILES_NAMES UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : Dict = ["""input_ids""", """attention_mask"""] UpperCamelCase : List[int] = [] UpperCamelCase : List[int] = [] def __init__( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any]="<s>" , lowerCAmelCase__ :Union[str, Any]="</s>" , lowerCAmelCase__ :Optional[int]="</s>" , lowerCAmelCase__ :Optional[Any]="<s>" , lowerCAmelCase__ :List[str]="<unk>" , lowerCAmelCase__ :List[Any]="<pad>" , lowerCAmelCase__ :str="<mask>" , lowerCAmelCase__ :int=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[int]=None , **lowerCAmelCase__ :str , ) ->Optional[Any]: lowercase = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) ) lowercase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowercase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase = 1 lowercase = len(self.sp_model ) lowercase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE ) } lowercase = {v: k for k, v in self.lang_code_to_id.items()} lowercase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowercase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) lowercase = src_lang if src_lang is not None else "en_XX" lowercase = self.lang_code_to_id[self._src_lang] lowercase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self :Optional[int] ) ->Tuple: lowercase = self.__dict__.copy() lowercase = None lowercase = self.sp_model.serialized_model_proto() return state def __setstate__( self :List[Any] , lowerCAmelCase__ :Union[str, Any] ) ->Optional[int]: lowercase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowercase = {} lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Optional[Any]: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def SCREAMING_SNAKE_CASE( self :List[Any] ) ->Union[str, Any]: return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE( self :Optional[int] , lowerCAmelCase__ :Union[str, Any] ) ->List[Any]: lowercase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] = None , lowerCAmelCase__ :Union[str, Any] = False ) ->Any: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) lowercase = [1] * len(self.prefix_tokens ) lowercase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones def SCREAMING_SNAKE_CASE( self :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int = None ) ->str: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE( self :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] = None ) ->List[str]: lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :Union[str, Any] ) ->int: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) lowercase = src_lang lowercase = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowercase = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) lowercase = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE( self :int ) ->Any: lowercase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE( self :Any , lowerCAmelCase__ :List[str] ) ->str: return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE( self :Dict , lowerCAmelCase__ :List[str] ) ->Any: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE( self :List[str] , lowerCAmelCase__ :Union[str, Any] ) ->Optional[Any]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE( self :int , lowerCAmelCase__ :Dict ) ->List[str]: lowercase = "".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , " " ).strip() return out_string def SCREAMING_SNAKE_CASE( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str] = None ) ->str: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , "wb" ) as fi: lowercase = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE( self :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] = "en_XX" , lowerCAmelCase__ :str = None , lowerCAmelCase__ :List[Any] = "ro_RO" , **lowerCAmelCase__ :int , ) ->Optional[Any]: lowercase = src_lang lowercase = tgt_lang return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->List[Any]: return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE( self :List[Any] ) ->Dict: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE( self :int , lowerCAmelCase__ :str ) ->Any: lowercase = self.lang_code_to_id[src_lang] lowercase = [] lowercase = [self.eos_token_id, self.cur_lang_code] def SCREAMING_SNAKE_CASE( self :Tuple , lowerCAmelCase__ :List[Any] ) ->Tuple: lowercase = self.lang_code_to_id[lang] lowercase = [] lowercase = [self.eos_token_id, self.cur_lang_code]
441
'''simple docstring''' def _lowerCAmelCase ( lowercase ) -> tuple[int, int]: try: __lowerCAmelCase = float(lowercase ) except ValueError: raise ValueError("""Please enter a valid number""" ) __lowerCAmelCase = decimal - int(lowercase ) if fractional_part == 0: return int(lowercase ), 1 else: __lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] ) __lowerCAmelCase = int(decimal * (10**number_of_frac_digits) ) __lowerCAmelCase = 10**number_of_frac_digits __lowerCAmelCase , __lowerCAmelCase = denominator, numerator while True: __lowerCAmelCase = dividend % divisor if remainder == 0: break __lowerCAmelCase , __lowerCAmelCase = divisor, remainder __lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor return int(lowercase ), int(lowercase ) if __name__ == "__main__": print(f'{decimal_to_fraction(2) = }') print(f'{decimal_to_fraction(89.0) = }') print(f'{decimal_to_fraction("67") = }') print(f'{decimal_to_fraction("45.0") = }') print(f'{decimal_to_fraction(1.5) = }') print(f'{decimal_to_fraction("6.25") = }') print(f'{decimal_to_fraction("78td") = }')
689
0
'''simple docstring''' import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node __lowerCAmelCase = 4 __lowerCAmelCase = 3 class __SCREAMING_SNAKE_CASE (lowerCAmelCase_ ): """simple docstring""" pass def __UpperCamelCase ( lowercase_ : List[Any] ): """simple docstring""" for shard in shards: for i in range(lowercase_ ): yield {"i": i, "shard": shard} def __UpperCamelCase ( ): """simple docstring""" a_ = int(os.environ['RANK'] ) a_ = int(os.environ['WORLD_SIZE'] ) a_ = ArgumentParser() parser.add_argument('--streaming' , type=lowercase_ ) parser.add_argument('--local_rank' , type=lowercase_ ) parser.add_argument('--num_workers' , type=lowercase_ , default=0 ) a_ = parser.parse_args() a_ = args.streaming a_ = args.num_workers a_ = {'shards': [F'shard_{shard_idx}' for shard_idx in range(lowercase_ )]} a_ = IterableDataset.from_generator(lowercase_ , gen_kwargs=lowercase_ ) if not streaming: a_ = Dataset.from_list(list(lowercase_ ) ) a_ = split_dataset_by_node(lowercase_ , rank=lowercase_ , world_size=lowercase_ ) a_ = torch.utils.data.DataLoader(lowercase_ , num_workers=lowercase_ ) a_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD a_ = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) a_ = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' ) if __name__ == "__main__": main()
536
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _a : Dict = _symbol_database.Default() _a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile( b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03""" ) _a : str = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals) if _descriptor._USE_C_DESCRIPTORS is False: _a : str = None _a : Union[str, Any] = b"""H\003""" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _a : Optional[int] = 4_5 _a : List[Any] = 1_5_8_1 _a : str = 1_5_1_7 _a : Optional[Any] = 1_5_7_0 _a : List[str] = 1_5_8_4 _a : List[Any] = 1_7_9_3 _a : Union[str, Any] = 1_7_9_5 _a : Tuple = 1_9_1_6 _a : List[Any] = 1_8_6_4 _a : Any = 1_9_0_5 _a : Optional[Any] = 1_9_1_9 _a : Optional[int] = 2_4_2_9 _a : Tuple = 2_2_0_8 _a : Optional[Any] = 2_4_1_8 _a : List[Any] = 2_3_2_3 _a : str = 2_4_0_7 # @@protoc_insertion_point(module_scope)
689
0
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [] for data in source_data: for i, el in enumerate(_UpperCamelCase ): if len(_UpperCamelCase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(_UpperCamelCase ) ) return data_lists def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [] for dlist, weight in zip(_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase = min(_UpperCamelCase ) __lowerCAmelCase = max(_UpperCamelCase ) __lowerCAmelCase = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: __lowerCAmelCase = f"Invalid weight of {weight:f} provided" raise ValueError(_UpperCamelCase ) score_lists.append(_UpperCamelCase ) return score_lists def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(_UpperCamelCase ): __lowerCAmelCase = final_scores[j] + ele return final_scores def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = get_data(_UpperCamelCase ) __lowerCAmelCase = calculate_each_score(_UpperCamelCase , _UpperCamelCase ) __lowerCAmelCase = generate_final_scores(_UpperCamelCase ) # append scores to source data for i, ele in enumerate(_UpperCamelCase ): source_data[i].append(_UpperCamelCase ) return source_data
636
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class _UpperCAmelCase ( lowerCAmelCase_ ): a : torch.FloatTensor class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,): '''simple docstring''' super().__init__() __lowerCAmelCase = layers_per_block __lowerCAmelCase = torch.nn.Convad( __SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,) __lowerCAmelCase = None __lowerCAmelCase = nn.ModuleList([] ) # down __lowerCAmelCase = block_out_channels[0] for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = output_channel __lowerCAmelCase = block_out_channels[i] __lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1 __lowerCAmelCase = get_down_block( __SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) self.down_blocks.append(__SCREAMING_SNAKE_CASE ) # mid __lowerCAmelCase = UNetMidBlockaD( in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) # out __lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 ) __lowerCAmelCase = nn.SiLU() __lowerCAmelCase = 2 * out_channels if double_z else out_channels __lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 ) __lowerCAmelCase = False def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = x __lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE ) if self.training and self.gradient_checkpointing: def create_custom_forward(__SCREAMING_SNAKE_CASE ): def custom_forward(*__SCREAMING_SNAKE_CASE ): return module(*__SCREAMING_SNAKE_CASE ) return custom_forward # down if is_torch_version(""">=""","""1.11.0""" ): for down_block in self.down_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) else: for down_block in self.down_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE ) else: # down for down_block in self.down_blocks: __lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE ) # post-process __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE ) return sample class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",): '''simple docstring''' super().__init__() __lowerCAmelCase = layers_per_block __lowerCAmelCase = nn.Convad( __SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,) __lowerCAmelCase = None __lowerCAmelCase = nn.ModuleList([] ) __lowerCAmelCase = in_channels if norm_type == """spatial""" else None # mid __lowerCAmelCase = UNetMidBlockaD( in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) # up __lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = output_channel __lowerCAmelCase = reversed_block_out_channels[i] __lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1 __lowerCAmelCase = get_up_block( __SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,) self.up_blocks.append(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = output_channel # out if norm_type == "spatial": __lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 ) __lowerCAmelCase = nn.SiLU() __lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 ) __lowerCAmelCase = False def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' __lowerCAmelCase = z __lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(__SCREAMING_SNAKE_CASE ): def custom_forward(*__SCREAMING_SNAKE_CASE ): return module(*__SCREAMING_SNAKE_CASE ) return custom_forward if is_torch_version(""">=""","""1.11.0""" ): # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) else: # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) else: # middle __lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) # post-process if latent_embeds is None: __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE ) return sample class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ): '''simple docstring''' super().__init__() __lowerCAmelCase = n_e __lowerCAmelCase = vq_embed_dim __lowerCAmelCase = beta __lowerCAmelCase = legacy __lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e ) __lowerCAmelCase = remap if self.remap is not None: self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) ) __lowerCAmelCase = self.used.shape[0] __lowerCAmelCase = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": __lowerCAmelCase = self.re_embed __lowerCAmelCase = self.re_embed + 1 print( f'Remapping {self.n_e} indices to {self.re_embed} indices. ' f'Using {self.unknown_index} for unknown indices.' ) else: __lowerCAmelCase = n_e __lowerCAmelCase = sane_index_shape def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = inds.shape assert len(__SCREAMING_SNAKE_CASE ) > 1 __lowerCAmelCase = inds.reshape(ishape[0],-1 ) __lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long() __lowerCAmelCase = match.argmax(-1 ) __lowerCAmelCase = match.sum(2 ) < 1 if self.unknown_index == "random": __lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device ) else: __lowerCAmelCase = self.unknown_index return new.reshape(__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = inds.shape assert len(__SCREAMING_SNAKE_CASE ) > 1 __lowerCAmelCase = inds.reshape(ishape[0],-1 ) __lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE ) if self.re_embed > self.used.shape[0]: # extra token __lowerCAmelCase = 0 # simply set to zero __lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE ) return back.reshape(__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = z.permute(0,2,3,1 ).contiguous() __lowerCAmelCase = z.view(-1,self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z __lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 ) __lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape ) __lowerCAmelCase = None __lowerCAmelCase = None # compute loss for embedding if not self.legacy: __lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: __lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients __lowerCAmelCase = z + (z_q - z).detach() # reshape back to match original input shape __lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous() if self.remap is not None: __lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis __lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten if self.sane_index_shape: __lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): '''simple docstring''' if self.remap is not None: __lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis __lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = indices.reshape(-1 ) # flatten again # get quantized latent vectors __lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ) if shape is not None: __lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE ) # reshape back to match original input shape __lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous() return z_q class _UpperCAmelCase ( lowerCAmelCase_ ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase = parameters __lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 ) __lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 ) __lowerCAmelCase = deterministic __lowerCAmelCase = torch.exp(0.5 * self.logvar ) __lowerCAmelCase = torch.exp(self.logvar ) if self.deterministic: __lowerCAmelCase = __lowerCAmelCase = torch.zeros_like( self.mean,device=self.parameters.device,dtype=self.parameters.dtype ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ): '''simple docstring''' __lowerCAmelCase = randn_tensor( self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype ) __lowerCAmelCase = self.mean + self.std * sample return x def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean,2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar,dim=[1, 2, 3],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) __lowerCAmelCase = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): '''simple docstring''' return self.mean
689
0
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging __snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowerCAmelCase ( lowerCAmelCase_ ): def __init__( self , UpperCamelCase__ , UpperCamelCase__=768 ) -> Any: '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE ) snake_case : Any = proj_size snake_case : Any = CLIPVisionModel(__SCREAMING_SNAKE_CASE ) snake_case : Union[str, Any] = PaintByExampleMapper(__SCREAMING_SNAKE_CASE ) snake_case : List[Any] = nn.LayerNorm(config.hidden_size ) snake_case : Any = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling snake_case : Union[str, Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict: '''simple docstring''' snake_case : List[Any] = self.model(pixel_values=__SCREAMING_SNAKE_CASE ) snake_case : Any = clip_output.pooler_output snake_case : Union[str, Any] = self.mapper(latent_states[:, None] ) snake_case : List[Any] = self.final_layer_norm(__SCREAMING_SNAKE_CASE ) snake_case : Optional[Any] = self.proj_out(__SCREAMING_SNAKE_CASE ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class _lowerCAmelCase ( nn.Module ): def __init__( self , UpperCamelCase__ ) -> int: '''simple docstring''' super().__init__() snake_case : Optional[Any] = (config.num_hidden_layers + 1) // 5 snake_case : Any = config.hidden_size snake_case : Dict = 1 snake_case : Dict = nn.ModuleList( [ BasicTransformerBlock(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , activation_fn="gelu" , attention_bias=__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) ] ) def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' for block in self.blocks: snake_case : List[Any] = block(__SCREAMING_SNAKE_CASE ) return hidden_states
178
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features _a : Optional[int] = logging.get_logger(__name__) _a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) _a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} ) a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} ) a : int =field( default=1_28 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : int =field( default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , ) a : int =field( default=64 , metadata={ """help""": ( """The maximum number of tokens for the question. Questions longer than this will """ """be truncated to this length.""" ) } , ) a : int =field( default=30 , metadata={ """help""": ( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ) } , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} ) a : float =field( default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) a : int =field( default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) a : int =field( default=0 , metadata={ """help""": ( """language id of input for language-specific xlm models (see""" """ tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)""" ) } , ) a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} ) class _UpperCAmelCase ( lowerCAmelCase_ ): a : Optional[Any] ="""train""" a : Optional[int] ="""dev""" class _UpperCAmelCase ( lowerCAmelCase_ ): a : SquadDataTrainingArguments a : List[SquadFeatures] a : Split a : bool def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",): '''simple docstring''' __lowerCAmelCase = args __lowerCAmelCase = is_language_sensitive __lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): try: __lowerCAmelCase = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) __lowerCAmelCase = mode # Load data features from cache or dataset file __lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1""" __lowerCAmelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowerCAmelCase = cached_features_file + """.lock""" with FileLock(__SCREAMING_SNAKE_CASE ): if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache: __lowerCAmelCase = time.time() __lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. __lowerCAmelCase = self.old_features["""features"""] __lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE ) logger.info( f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in' """ future run""" ) else: if mode == Split.dev: __lowerCAmelCase = self.processor.get_dev_examples(args.data_dir ) else: __lowerCAmelCase = self.processor.get_train_examples(args.data_dir ) __lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features( examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = time.time() torch.save( {"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = self.features[i] __lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float ) __lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float ) __lowerCAmelCase = { """input_ids""": input_ids, """attention_mask""": attention_mask, """token_type_ids""": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} ) if self.args.version_2_with_negative: inputs.update({"""is_impossible""": is_impossible} ) if self.is_language_sensitive: inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: __lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long ) inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} ) return inputs
689
0
def _lowerCAmelCase ( __lowerCAmelCase ) -> int: """simple docstring""" snake_case__ : List[Any] = 1 for i in range(1 , num + 1 ): fact *= i return fact def _lowerCAmelCase ( __lowerCAmelCase ) -> int: """simple docstring""" snake_case__ : Union[str, Any] = 0 while number > 0: snake_case__ : Any = number % 10 sum_of_digits += last_digit snake_case__ : List[Any] = number // 10 # Removing the last_digit from the given number return sum_of_digits def _lowerCAmelCase ( __lowerCAmelCase = 100 ) -> int: """simple docstring""" snake_case__ : Dict = factorial(__lowerCAmelCase ) snake_case__ : Optional[Any] = split_and_add(__lowerCAmelCase ) return result if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
252
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _lowerCAmelCase ( lowercase ) -> Optional[Any]: # vision encoder if "img_encoder.pos_embed" in name: __lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: __lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: __lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: __lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: __lowerCAmelCase = name.replace("""blocks""" , """layers""" ) if "attn" in name and "pre_assign" not in name: __lowerCAmelCase = name.replace("""attn""" , """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: __lowerCAmelCase = name.replace("""proj""" , """out_proj""" ) if "pre_assign_attn.attn.proj" in name: __lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: __lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" ) if "img_encoder.norm" in name: __lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: __lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: __lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: __lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" ) if "ln_1" in name: __lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: __lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: __lowerCAmelCase = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: __lowerCAmelCase = name.replace("""c_proj""" , """fc2""" ) if "text_encoder" in name: __lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" ) if "ln_final" in name: __lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: __lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" ) if "img_projector.linear_out." in name: __lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: __lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" ) if "text_projector.linear_out" in name: __lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> Dict: for key in orig_state_dict.copy().keys(): __lowerCAmelCase = orig_state_dict.pop(lowercase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __lowerCAmelCase = key.split(""".""" ) __lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] ) __lowerCAmelCase = config.vision_config.hidden_size if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[dim : dim * 2, :] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val[:dim] __lowerCAmelCase = val[dim : dim * 2] __lowerCAmelCase = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __lowerCAmelCase = key.split(""".""" ) __lowerCAmelCase = int(key_split[3] ) __lowerCAmelCase = config.text_config.hidden_size if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[ dim : dim * 2, : ] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val[:dim] __lowerCAmelCase = val[dim : dim * 2] __lowerCAmelCase = val[-dim:] else: __lowerCAmelCase = rename_key(lowercase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): __lowerCAmelCase = val.squeeze_() else: __lowerCAmelCase = val return orig_state_dict def _lowerCAmelCase ( ) -> str: __lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]: __lowerCAmelCase = GroupViTConfig() __lowerCAmelCase = GroupViTModel(lowercase ).eval() __lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""] __lowerCAmelCase = convert_state_dict(lowercase , lowercase ) __lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0) # verify result __lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) __lowerCAmelCase = prepare_img() __lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" ) with torch.no_grad(): __lowerCAmelCase = model(**lowercase ) if model_name == "groupvit-gcc-yfcc": __lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] ) elif model_name == "groupvit-gcc-redcaps": __lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 ) processor.save_pretrained(lowercase ) model.save_pretrained(lowercase ) print("""Successfully saved processor and model to""" , lowercase ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(lowercase , organization="""nielsr""" ) model.push_to_hub(lowercase , organization="""nielsr""" ) if __name__ == "__main__": _a : int = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _a : List[str] = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
689
0
import json import os import torch from diffusers import UNetaDModel os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True) def __lowerCAmelCase ( A ): if hor == 128: UpperCAmelCase_ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") UpperCAmelCase_ = (32, 128, 256) UpperCAmelCase_ = ("UpResnetBlock1D", "UpResnetBlock1D") elif hor == 32: UpperCAmelCase_ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") UpperCAmelCase_ = (32, 64, 128, 256) UpperCAmelCase_ = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") UpperCAmelCase_ = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" ) UpperCAmelCase_ = model.state_dict() UpperCAmelCase_ = { "down_block_types": down_block_types, "block_out_channels": block_out_channels, "up_block_types": up_block_types, "layers_per_block": 1, "use_timestep_embedding": True, "out_block_type": "OutConv1DBlock", "norm_num_groups": 8, "downsample_each_block": False, "in_channels": 14, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "flip_sin_to_cos": False, "freq_shift": 1, "sample_size": 65536, "mid_block_type": "MidResTemporalBlock1D", "act_fn": "mish", } UpperCAmelCase_ = UNetaDModel(**A ) print(F"length of state dict: {len(state_dict.keys() )}" ) print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" ) UpperCAmelCase_ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): UpperCAmelCase_ = state_dict.pop(A ) hf_value_function.load_state_dict(A ) torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" ) with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f: json.dump(A , A ) def __lowerCAmelCase ( ): UpperCAmelCase_ = { "in_channels": 14, "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), "up_block_types": (), "out_block_type": "ValueFunction", "mid_block_type": "ValueFunctionMidBlock1D", "block_out_channels": (32, 64, 128, 256), "layers_per_block": 1, "downsample_each_block": True, "sample_size": 65536, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "use_timestep_embedding": True, "flip_sin_to_cos": False, "freq_shift": 1, "norm_num_groups": 8, "act_fn": "mish", } UpperCAmelCase_ = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" ) UpperCAmelCase_ = model UpperCAmelCase_ = UNetaDModel(**A ) print(F"length of state dict: {len(state_dict.keys() )}" ) print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" ) UpperCAmelCase_ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): UpperCAmelCase_ = state_dict.pop(A ) hf_value_function.load_state_dict(A ) torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" ) with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f: json.dump(A , A ) if __name__ == "__main__": unet(32) # unet(128) value_function()
162
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() _a : Tuple = logging.get_logger(__name__) _a : Optional[int] = ["""model.decoder.embed_positions.weights"""] def _lowerCAmelCase ( lowercase ) -> Optional[Any]: if "emb" in name: __lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: __lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: __lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: __lowerCAmelCase = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: __lowerCAmelCase = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: __lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: __lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: __lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: __lowerCAmelCase = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: __lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]: __lowerCAmelCase = list(state_dict.keys() ) __lowerCAmelCase = {} for key in keys: __lowerCAmelCase = state_dict.pop(lowercase ) __lowerCAmelCase = rename_keys(lowercase ) if "in_proj_weight" in key: # split fused qkv proj __lowerCAmelCase = val[:hidden_size, :] __lowerCAmelCase = val[hidden_size : 2 * hidden_size, :] __lowerCAmelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __lowerCAmelCase = val else: __lowerCAmelCase = val return state_dict, enc_dec_proj_state_dict def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values __lowerCAmelCase = 1024 __lowerCAmelCase = 24 __lowerCAmelCase = 16 elif checkpoint == "medium": __lowerCAmelCase = 1536 __lowerCAmelCase = 48 __lowerCAmelCase = 24 elif checkpoint == "large": __lowerCAmelCase = 2048 __lowerCAmelCase = 48 __lowerCAmelCase = 32 else: raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) __lowerCAmelCase = MusicgenDecoderConfig( hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , ) return config @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]: __lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase ) __lowerCAmelCase = decoder_config_from_checkpoint(lowercase ) __lowerCAmelCase = fairseq_model.lm.state_dict() __lowerCAmelCase , __lowerCAmelCase = rename_state_dict( lowercase , hidden_size=decoder_config.hidden_size ) __lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" ) __lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) __lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(lowercase ) if len(lowercase ) > 0: raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' ) if len(lowercase ) > 0: raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model __lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(lowercase ) # check we can do a forward pass __lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __lowerCAmelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits if logits.shape != (8, 1, 2048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor __lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" ) __lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) __lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase ) # set the appropriate bos/pad token ids __lowerCAmelCase = 2048 __lowerCAmelCase = 2048 # set other default generation config params __lowerCAmelCase = int(30 * audio_encoder.config.frame_rate ) __lowerCAmelCase = True __lowerCAmelCase = 3.0 if pytorch_dump_folder is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(lowercase ) processor.save_pretrained(lowercase ) if repo_id: logger.info(f'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(lowercase ) processor.push_to_hub(lowercase ) if __name__ == "__main__": _a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) _a : List[Any] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
689
0
'''simple docstring''' from cva import destroyAllWindows, imread, imshow, waitKey def _lowercase (SCREAMING_SNAKE_CASE ): '''simple docstring''' __A ,__A : str = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(SCREAMING_SNAKE_CASE ): for j in range(SCREAMING_SNAKE_CASE ): __A : str = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image _UpperCamelCase = imread("""image_data/lena.jpg""", 1) # convert to its negative _UpperCamelCase = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
111
'''simple docstring''' from collections import deque def _lowerCAmelCase ( lowercase ) -> Dict: __lowerCAmelCase = len(lowercase ) __lowerCAmelCase = deque() __lowerCAmelCase = [False for _ in range(lowercase )] __lowerCAmelCase = [-1 for _ in range(lowercase )] __lowerCAmelCase = index_of[:] def strong_connect(lowercase , lowercase , lowercase ): __lowerCAmelCase = index # the number when this node is seen __lowerCAmelCase = index # lowest rank node reachable from here index += 1 stack.append(lowercase ) __lowerCAmelCase = True for w in g[v]: if index_of[w] == -1: __lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase ) __lowerCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: __lowerCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: __lowerCAmelCase = [] __lowerCAmelCase = stack.pop() __lowerCAmelCase = False component.append(lowercase ) while w != v: __lowerCAmelCase = stack.pop() __lowerCAmelCase = False component.append(lowercase ) components.append(lowercase ) return index __lowerCAmelCase = [] for v in range(lowercase ): if index_of[v] == -1: strong_connect(lowercase , 0 , lowercase ) return components def _lowerCAmelCase ( lowercase , lowercase ) -> str: __lowerCAmelCase = [[] for _ in range(lowercase )] for u, v in edges: g[u].append(lowercase ) return g if __name__ == "__main__": # Test _a : Any = 7 _a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6] _a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5] _a : Optional[Any] = [(u, v) for u, v in zip(source, target)] _a : Optional[int] = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
689
0
'''simple docstring''' from math import factorial def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> float: '''simple docstring''' if successes > trials: raise ValueError("successes must be lower or equal to trials" ) if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers" ) if not isinstance(lowercase__ ,lowercase__ ) or not isinstance(lowercase__ ,lowercase__ ): raise ValueError("the function is defined for non-negative integers" ) if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0" ) a_ = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! a_ = float(factorial(lowercase__ ) ) coefficient /= factorial(lowercase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('Probability of 2 successes out of 4 trails') print('with probability of 0.75 is:', end=' ') print(binomial_distribution(2, 4, 0.75))
685
'''simple docstring''' from argparse import ArgumentParser from .env import EnvironmentCommand def _lowerCAmelCase ( ) -> Union[str, Any]: __lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) __lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(lowercase ) # Let's go __lowerCAmelCase = parser.parse_args() if not hasattr(lowercase , """func""" ): parser.print_help() exit(1 ) # Run __lowerCAmelCase = args.func(lowercase ) service.run() if __name__ == "__main__": main()
689
0
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters UpperCamelCase__ = False UpperCamelCase__ = False def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" return TrainCommand(_UpperCamelCase ) class _UpperCAmelCase ( lowerCAmelCase_ ): @staticmethod def lowerCAmelCase__ ( a : List[str] ): '''simple docstring''' lowercase_ : str = parser.add_parser("train" , help="CLI tool to train a model on a task." ) train_parser.add_argument( "--train_data" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , ) train_parser.add_argument( "--column_label" , type=__SCREAMING_SNAKE_CASE , default=0 , help="Column of the dataset csv file with example labels." ) train_parser.add_argument( "--column_text" , type=__SCREAMING_SNAKE_CASE , default=1 , help="Column of the dataset csv file with example texts." ) train_parser.add_argument( "--column_id" , type=__SCREAMING_SNAKE_CASE , default=2 , help="Column of the dataset csv file with example ids." ) train_parser.add_argument( "--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." ) train_parser.add_argument("--validation_data" , type=__SCREAMING_SNAKE_CASE , default="" , help="path to validation dataset." ) train_parser.add_argument( "--validation_split" , type=__SCREAMING_SNAKE_CASE , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , ) train_parser.add_argument("--output" , type=__SCREAMING_SNAKE_CASE , default="./" , help="path to saved the trained model." ) train_parser.add_argument( "--task" , type=__SCREAMING_SNAKE_CASE , default="text_classification" , help="Task to train the model on." ) train_parser.add_argument( "--model" , type=__SCREAMING_SNAKE_CASE , default="bert-base-uncased" , help="Model's name or path to stored model." ) train_parser.add_argument("--train_batch_size" , type=__SCREAMING_SNAKE_CASE , default=3_2 , help="Batch size for training." ) train_parser.add_argument("--valid_batch_size" , type=__SCREAMING_SNAKE_CASE , default=6_4 , help="Batch size for validation." ) train_parser.add_argument("--learning_rate" , type=__SCREAMING_SNAKE_CASE , default=3e-5 , help="Learning rate." ) train_parser.add_argument("--adam_epsilon" , type=__SCREAMING_SNAKE_CASE , default=1e-08 , help="Epsilon for Adam optimizer." ) train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE ) def __init__( self : Optional[int] , a : Union[str, Any] ): '''simple docstring''' lowercase_ : Union[str, Any] = logging.get_logger("transformers-cli/training" ) lowercase_ : Any = "tf" if is_tf_available() else "torch" os.makedirs(args.output , exist_ok=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = args.output lowercase_ : Any = args.column_label lowercase_ : Dict = args.column_text lowercase_ : Optional[int] = args.column_id self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" ) if args.task == "text_classification": lowercase_ : List[Any] = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f"""Loading dataset from {args.train_data}""" ) lowercase_ : Dict = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) lowercase_ : List[Any] = None if args.validation_data: self.logger.info(f"""Loading validation dataset from {args.validation_data}""" ) lowercase_ : Optional[int] = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) lowercase_ : Union[str, Any] = args.validation_split lowercase_ : Dict = args.train_batch_size lowercase_ : Dict = args.valid_batch_size lowercase_ : Optional[Any] = args.learning_rate lowercase_ : Dict = args.adam_epsilon def lowerCAmelCase__ ( self : Tuple ): '''simple docstring''' if self.framework == "tf": return self.run_tf() return self.run_torch() def lowerCAmelCase__ ( self : Dict ): '''simple docstring''' raise NotImplementedError def lowerCAmelCase__ ( self : str ): '''simple docstring''' self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
620
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _a : List[Any] = logging.get_logger(__name__) _a : int = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } _a : Any = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str: for attribute in key.split(""".""" ): __lowerCAmelCase = getattr(lowercase , lowercase ) if weight_type is not None: __lowerCAmelCase = getattr(lowercase , lowercase ).shape else: __lowerCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": __lowerCAmelCase = value elif weight_type == "weight_g": __lowerCAmelCase = value elif weight_type == "weight_v": __lowerCAmelCase = value elif weight_type == "bias": __lowerCAmelCase = value else: __lowerCAmelCase = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]: __lowerCAmelCase = [] __lowerCAmelCase = fairseq_model.state_dict() __lowerCAmelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , ) __lowerCAmelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key): # special case since naming is very similar continue __lowerCAmelCase = True if "*" in mapped_key: __lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2] __lowerCAmelCase = mapped_key.replace("""*""" , lowercase ) if "weight_g" in name: __lowerCAmelCase = """weight_g""" elif "weight_v" in name: __lowerCAmelCase = """weight_v""" elif "bias" in name: __lowerCAmelCase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase = """weight""" else: __lowerCAmelCase = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(f'Unused weights: {unused_weights}' ) def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __lowerCAmelCase = full_name.split("""conv_layers.""" )[-1] __lowerCAmelCase = name.split(""".""" ) __lowerCAmelCase = int(items[0] ) __lowerCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowercase ) @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict: if config_path is not None: __lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase ) else: __lowerCAmelCase = UniSpeechSatConfig() __lowerCAmelCase = """""" if is_finetuned: __lowerCAmelCase = UniSpeechSatForCTC(lowercase ) else: __lowerCAmelCase = UniSpeechSatForPreTraining(lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) __lowerCAmelCase = model[0].eval() recursively_load_weights(lowercase , lowercase ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": _a : List[str] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) _a : Union[str, Any] = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
689
0
'''simple docstring''' from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): def __lt__( self : Union[str, Any] , lowercase__ : int ): '''simple docstring''' return self[-1] < other[-1] def __eq__( self : int , lowercase__ : Optional[int] ): '''simple docstring''' return self[-1] == other[-1] def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Union[str, Any] ): """simple docstring""" a_ : Any = [] # sort into stacks for element in collection: a_ : Optional[int] = Stack([element] ) a_ : List[Any] = bisect_left(UpperCamelCase__ , UpperCamelCase__ ) if i != len(UpperCamelCase__ ): stacks[i].append(UpperCamelCase__ ) else: stacks.append(UpperCamelCase__ ) # use a heap-based merge to merge stack efficiently a_ : Tuple = merge(*(reversed(UpperCamelCase__ ) for stack in stacks) ) return collection if __name__ == "__main__": lowerCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip() lowerCAmelCase_ : Optional[Any] = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
442
'''simple docstring''' from scipy.stats import spearmanr import datasets _a : str = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ _a : Dict = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ _a : List[str] = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): def lowerCamelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
689
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __lowerCamelCase = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)[\"depth\"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline(\"depth-estimation\") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to(\"cuda\") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to(\"cuda\") >>> img = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/cat.png\" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\") >>> prompt = \"A robot, 4k photo\" >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\" >>> generator = torch.Generator(device=\"cuda\").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save(\"robot_cat.png\") ``` """ def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int=8 ): snake_case : Dict = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 snake_case : int = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCAmelCase ( lowerCAmelCase_ ): def __init__(self : Tuple , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict , ) -> str: '''simple docstring''' super().__init__() self.register_modules( unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , movq=__SCREAMING_SNAKE_CASE , ) snake_case : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Any ) -> Optional[int]: '''simple docstring''' if latents is None: snake_case : str = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ) else: if latents.shape != shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) snake_case : int = latents.to(__SCREAMING_SNAKE_CASE ) snake_case : Dict = latents * scheduler.init_noise_sigma return latents def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int]=0 ) -> Any: '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) snake_case : Tuple = torch.device(f"""cuda:{gpu_id}""" ) snake_case : Optional[int] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : int=0 ) -> Any: '''simple docstring''' if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) snake_case : List[Any] = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=__SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) snake_case : Any = None for cpu_offloaded_model in [self.unet, self.movq]: snake_case , snake_case : int = cpu_offload_with_hook(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prev_module_hook=__SCREAMING_SNAKE_CASE ) # We'll offload the last model manually. snake_case : Optional[Any] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]: '''simple docstring''' if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__SCREAMING_SNAKE_CASE , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__SCREAMING_SNAKE_CASE ) def __call__(self : Tuple , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : str = 5_12 , snake_case__ : List[Any] = 5_12 , snake_case__ : List[str] = 1_00 , snake_case__ : List[Any] = 4.0 , snake_case__ : List[Any] = 1 , snake_case__ : List[Any] = None , snake_case__ : Any = None , snake_case__ : Any = "pil" , snake_case__ : Tuple = True , ) -> Tuple: '''simple docstring''' snake_case : Optional[Any] = self._execution_device snake_case : List[Any] = guidance_scale > 1.0 if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): snake_case : Union[str, Any] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): snake_case : List[Any] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): snake_case : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) snake_case : Tuple = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: snake_case : Dict = image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 ) snake_case : Optional[int] = negative_image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 ) snake_case : List[str] = hint.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 ) snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__SCREAMING_SNAKE_CASE ) snake_case : Any = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__SCREAMING_SNAKE_CASE ) self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) snake_case : Tuple = self.scheduler.timesteps snake_case : Optional[int] = self.movq.config.latent_channels snake_case , snake_case : Union[str, Any] = downscale_height_and_width(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.movq_scale_factor ) # create initial latent snake_case : str = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.scheduler , ) for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE ) ): # expand the latents if we are doing classifier free guidance snake_case : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents snake_case : Optional[int] = {"image_embeds": image_embeds, "hint": hint} snake_case : List[Any] = self.unet( sample=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , added_cond_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0] if do_classifier_free_guidance: snake_case , snake_case : List[Any] = noise_pred.split(latents.shape[1] , dim=1 ) snake_case , snake_case : Any = noise_pred.chunk(2 ) snake_case , snake_case : str = variance_pred.chunk(2 ) snake_case : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) snake_case : int = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): snake_case , snake_case : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 snake_case : List[str] = self.scheduler.step( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )[0] # post-processing snake_case : Tuple = self.movq.decode(__SCREAMING_SNAKE_CASE , force_not_quantize=__SCREAMING_SNAKE_CASE )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: snake_case : Any = image * 0.5 + 0.5 snake_case : Any = image.clamp(0 , 1 ) snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": snake_case : Optional[int] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
204
'''simple docstring''' from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ): a : List[str] =["""onnx"""] def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' requires_backends(self,["""onnx"""] ) @classmethod def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' requires_backends(cls,["""onnx"""] ) @classmethod def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' requires_backends(cls,["""onnx"""] )
689
0
def __snake_case ( __magic_name__ , __magic_name__ ): '''simple docstring''' lowercase = 0 lowercase = len(__magic_name__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowercase = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(__magic_name__ ): return None lowercase = sorted_collection[point] if current_item == item: return point else: if point < left: lowercase = left lowercase = point elif point > right: lowercase = right lowercase = point else: if item < current_item: lowercase = point - 1 else: lowercase = point + 1 return None def __snake_case ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowercase = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(__magic_name__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) elif point > right: return interpolation_search_by_recursion(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( __magic_name__ , __magic_name__ , __magic_name__ , point - 1 ) else: return interpolation_search_by_recursion( __magic_name__ , __magic_name__ , point + 1 , __magic_name__ ) def __snake_case ( __magic_name__ ): '''simple docstring''' if collection != sorted(__magic_name__ ): raise ValueError("Collection must be ascending sorted" ) return True if __name__ == "__main__": import sys _snake_case : Dict = 0 if debug == 1: _snake_case : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") _snake_case : str = 67 _snake_case : Optional[int] = interpolation_search(collection, target) if result is not None: print(F"{target} found at positions: {result}") else: print("Not found")
441
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _a : int = logging.get_logger(__name__) _a : Optional[int] = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class _UpperCAmelCase ( lowerCAmelCase_ ): a : List[str] ="""gptj""" a : Optional[int] ={ """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = vocab_size __lowerCAmelCase = n_positions __lowerCAmelCase = n_embd __lowerCAmelCase = n_layer __lowerCAmelCase = n_head __lowerCAmelCase = n_inner __lowerCAmelCase = rotary_dim __lowerCAmelCase = activation_function __lowerCAmelCase = resid_pdrop __lowerCAmelCase = embd_pdrop __lowerCAmelCase = attn_pdrop __lowerCAmelCase = layer_norm_epsilon __lowerCAmelCase = initializer_range __lowerCAmelCase = use_cache __lowerCAmelCase = bos_token_id __lowerCAmelCase = eos_token_id super().__init__( bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ) class _UpperCAmelCase ( lowerCAmelCase_ ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,): '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE ) if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ): # TODO: how to do that better? __lowerCAmelCase = 0 @property def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" ) __lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""} else: __lowerCAmelCase = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowerCamelCase__ ( self ): '''simple docstring''' return self._config.n_layer @property def lowerCamelCase__ ( self ): '''simple docstring''' return self._config.n_head def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,): '''simple docstring''' __lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs( __SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() __lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __lowerCAmelCase = seqlen + 2 __lowerCAmelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __lowerCAmelCase = [ (torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] __lowerCAmelCase = common_inputs["""attention_mask"""] if self.use_past: __lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype __lowerCAmelCase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 ) return ordered_inputs @property def lowerCamelCase__ ( self ): '''simple docstring''' return 13
689
0
'''simple docstring''' import os import sys import unittest __lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowerCAmelCase = os.path.join(git_repo_path, "src", "transformers") __lowerCAmelCase = """ {0} = None """ __lowerCAmelCase = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ __lowerCAmelCase = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def _a ( self ): """simple docstring""" a_ = find_backend(' _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")' ) self.assertIsNone(__SCREAMING_SNAKE_CASE ) a_ = find_backend(' if not is_tokenizers_available():' ) self.assertEqual(__SCREAMING_SNAKE_CASE , 'tokenizers' ) a_ = find_backend(' if not is_tensorflow_text_available():' ) self.assertEqual(__SCREAMING_SNAKE_CASE , 'tensorflow_text' ) a_ = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' ) self.assertEqual(__SCREAMING_SNAKE_CASE , 'sentencepiece_and_tokenizers' ) a_ = find_backend( ' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' ) self.assertEqual(__SCREAMING_SNAKE_CASE , 'sentencepiece_and_tensorflow_text' ) a_ = find_backend( ' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' ) self.assertEqual(__SCREAMING_SNAKE_CASE , 'sentencepiece_and_tokenizers_and_vision' ) def _a ( self ): """simple docstring""" a_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , __SCREAMING_SNAKE_CASE ) self.assertIn('tensorflow_text' , __SCREAMING_SNAKE_CASE ) self.assertIn('sentencepiece_and_tokenizers' , __SCREAMING_SNAKE_CASE ) # Likewise, we can't assert on the exact content of a key self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertModel' , objects['tf'] ) self.assertIn('FlaxBertModel' , objects['flax'] ) self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] ) self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] ) def _a ( self ): """simple docstring""" a_ = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(__SCREAMING_SNAKE_CASE , '\nCONSTANT = None\n' ) a_ = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( __SCREAMING_SNAKE_CASE , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) a_ = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n' a_ = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self ): """simple docstring""" a_ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n' a_ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , __SCREAMING_SNAKE_CASE )
536
'''simple docstring''' def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int: __lowerCAmelCase = set() __lowerCAmelCase = int((limit - 24) ** (1 / 2) ) __lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) ) for primea in primes: __lowerCAmelCase = primea * primea for primea in primes: __lowerCAmelCase = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: __lowerCAmelCase = primea * primea * primea * primea __lowerCAmelCase = square + cube + tetr if total >= limit: break ret.add(lowercase ) return len(lowercase ) if __name__ == "__main__": print(f'{solution() = }')
689
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class _UpperCamelCase : '''simple docstring''' __UpperCAmelCase : Optional[int] =BlenderbotSmallConfig __UpperCAmelCase : Optional[int] ={} __UpperCAmelCase : Optional[int] ="""gelu""" def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = eos_token_id __lowerCAmelCase = pad_token_id __lowerCAmelCase = bos_token_id def snake_case ( self ): __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __lowerCAmelCase = prepare_blenderbot_small_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return config, inputs_dict def snake_case ( self , __a , __a ): __lowerCAmelCase = TFBlenderbotSmallModel(config=__SCREAMING_SNAKE_CASE ).get_decoder() __lowerCAmelCase = inputs_dict["input_ids"] __lowerCAmelCase = input_ids[:1, :] __lowerCAmelCase = inputs_dict["attention_mask"][:1, :] __lowerCAmelCase = inputs_dict["head_mask"] __lowerCAmelCase = 1 # first forward pass __lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) __lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] __lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx] __lowerCAmelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1e-3 ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ): '''simple docstring''' if attention_mask is None: __lowerCAmelCase = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __lowerCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCamelCase ( lowerCAmelCase_ ,lowerCAmelCase_ ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Optional[int] =( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __UpperCAmelCase : Optional[Any] =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __UpperCAmelCase : Optional[int] =( { """conversational""": TFBlenderbotSmallForConditionalGeneration, """feature-extraction""": TFBlenderbotSmallModel, """summarization""": TFBlenderbotSmallForConditionalGeneration, """text2text-generation""": TFBlenderbotSmallForConditionalGeneration, """translation""": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __UpperCAmelCase : Tuple =True __UpperCAmelCase : Union[str, Any] =False __UpperCAmelCase : List[str] =False def snake_case ( self ): __lowerCAmelCase = TFBlenderbotSmallModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE ) def snake_case ( self ): self.config_tester.run_common_tests() def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) @require_tokenizers @require_tf class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Any =[ """Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """ """ i'm going to throw up.\nand why is that?""" ] __UpperCAmelCase : Dict ="""facebook/blenderbot_small-90M""" @cached_property def snake_case ( self ): return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def snake_case ( self ): __lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def snake_case ( self ): __lowerCAmelCase = self.tokenizer(self.src_text , return_tensors="tf" ) __lowerCAmelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__SCREAMING_SNAKE_CASE , ) __lowerCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__SCREAMING_SNAKE_CASE )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
636
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ): a : Optional[int] =TextToVideoSDPipeline a : Optional[int] =TEXT_TO_IMAGE_PARAMS a : Any =TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. a : Union[str, Any] =frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def lowerCamelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,) __lowerCAmelCase = DDIMScheduler( beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,) torch.manual_seed(0 ) __lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,) __lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ): '''simple docstring''' if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = """np""" __lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames __lowerCAmelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) __lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase__ ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",) def lowerCamelCase__ ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowerCamelCase__ ( self ): '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowerCamelCase__ ( self ): '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def lowerCamelCase__ ( self ): '''simple docstring''' pass def lowerCamelCase__ ( self ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) __lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) __lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __lowerCAmelCase = pipe.to("""cuda""" ) __lowerCAmelCase = """Spiderman is surfing""" __lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames __lowerCAmelCase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) __lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) __lowerCAmelCase = pipe.to("""cuda""" ) __lowerCAmelCase = """Spiderman is surfing""" __lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames __lowerCAmelCase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
689
0
"""simple docstring""" def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : List[str] , lowercase : List[str] ) -> int: """simple docstring""" def count_of_possible_combinations(lowercase : Optional[int] ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(lowercase ) def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Tuple ) -> int: """simple docstring""" def count_of_possible_combinations_with_dp_array( lowercase : List[str] , lowercase : Optional[Any] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] snake_case : Optional[Any] = sum( count_of_possible_combinations_with_dp_array(target - item , lowercase ) for item in array ) snake_case : str = answer return answer snake_case : List[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(lowercase , lowercase ) def __lowerCAmelCase ( lowercase : Tuple , lowercase : str , lowercase : Any ) -> int: """simple docstring""" snake_case : Optional[Any] = [0] * (target + 1) snake_case : int = 1 for i in range(1 , target + 1 ): for j in range(lowercase ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __snake_case = 3 __snake_case = 5 __snake_case = [1, 2, 5] print(combination_sum_iv(n, array, target))
178
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _lowerCAmelCase ( lowercase ) -> Optional[int]: if not is_accelerate_available(): return method __lowerCAmelCase = version.parse(accelerate.__version__ ).base_version if version.parse(lowercase ) < version.parse("""0.17.0""" ): return method def wrapper(self , *lowercase , **lowercase ): if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ): self._hf_hook.pre_forward(self ) return method(self , *lowercase , **lowercase ) return wrapper
689
0
import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class a ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ): def __init__( self :Optional[int] ,__lowercase :Any=None ,**__lowercase :Optional[Any] ): super().__init__(features=__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[int] = torch_tensor_kwargs import torch # noqa import torch at initialization def __lowerCamelCase ( self :List[Any] ,__lowercase :Optional[int] ): import torch if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) and column: if all( isinstance(__SCREAMING_SNAKE_CASE ,torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(__SCREAMING_SNAKE_CASE ) return column def __lowerCamelCase ( self :List[Any] ,__lowercase :Dict ): import torch if isinstance(__SCREAMING_SNAKE_CASE ,(str, bytes, type(__SCREAMING_SNAKE_CASE )) ): return value elif isinstance(__SCREAMING_SNAKE_CASE ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ): return value.tolist() snake_case__ : Any = {} if isinstance(__SCREAMING_SNAKE_CASE ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ): snake_case__ : Dict = {'''dtype''': torch.intaa} elif isinstance(__SCREAMING_SNAKE_CASE ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ): snake_case__ : Tuple = {'''dtype''': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__SCREAMING_SNAKE_CASE ,PIL.Image.Image ): snake_case__ : Optional[Any] = np.asarray(__SCREAMING_SNAKE_CASE ) return torch.tensor(__SCREAMING_SNAKE_CASE ,**{**default_dtype, **self.torch_tensor_kwargs} ) def __lowerCamelCase ( self :Tuple ,__lowercase :Any ): import torch # support for torch, tf, jax etc. if hasattr(__SCREAMING_SNAKE_CASE ,'''__array__''' ) and not isinstance(__SCREAMING_SNAKE_CASE ,torch.Tensor ): snake_case__ : List[str] = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__SCREAMING_SNAKE_CASE ,np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE ) for substruct in data_struct] ) elif isinstance(__SCREAMING_SNAKE_CASE ,(list, tuple) ): return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE ) for substruct in data_struct] ) return self._tensorize(__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self :Tuple ,__lowercase :str ): return map_nested(self._recursive_tensorize ,__SCREAMING_SNAKE_CASE ,map_list=__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self :List[Any] ,__lowercase :Any ): snake_case__ : Dict = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE ) snake_case__ : Any = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE ) return self.recursive_tensorize(__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self :List[str] ,__lowercase :Optional[Any] ): snake_case__ : str = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE ) snake_case__ : Any = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE ,pa_table.column_names[0] ) snake_case__ : int = self.recursive_tensorize(__SCREAMING_SNAKE_CASE ) snake_case__ : Tuple = self._consolidate(__SCREAMING_SNAKE_CASE ) return column def __lowerCamelCase ( self :List[Any] ,__lowercase :List[str] ): snake_case__ : List[str] = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE ) snake_case__ : Union[str, Any] = self.recursive_tensorize(__SCREAMING_SNAKE_CASE ) for column_name in batch: snake_case__ : Tuple = self._consolidate(batch[column_name] ) return batch
252
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: # load base model __lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors __lowerCAmelCase = load_file(lowercase ) __lowerCAmelCase = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: __lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) __lowerCAmelCase = pipeline.text_encoder else: __lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) __lowerCAmelCase = pipeline.unet # find the target layer __lowerCAmelCase = layer_infos.pop(0 ) while len(lowercase ) > -1: try: __lowerCAmelCase = curr_layer.__getattr__(lowercase ) if len(lowercase ) > 0: __lowerCAmelCase = layer_infos.pop(0 ) elif len(lowercase ) == 0: break except Exception: if len(lowercase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: __lowerCAmelCase = layer_infos.pop(0 ) __lowerCAmelCase = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) ) pair_keys.append(lowercase ) else: pair_keys.append(lowercase ) pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: __lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) __lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 ) else: __lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa ) __lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ) # update visited list for item in pair_keys: visited.append(lowercase ) return pipeline if __name__ == "__main__": _a : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") _a : Optional[int] = parser.parse_args() _a : Dict = args.base_model_path _a : Optional[Any] = args.checkpoint_path _a : Union[str, Any] = args.dump_path _a : Optional[int] = args.lora_prefix_unet _a : int = args.lora_prefix_text_encoder _a : str = args.alpha _a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _a : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
689
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a: Optional[int] = { """configuration_blip_2""": [ """BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Blip2Config""", """Blip2QFormerConfig""", """Blip2VisionConfig""", ], """processing_blip_2""": ["""Blip2Processor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a: Optional[int] = [ """BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""", """Blip2Model""", """Blip2QFormerModel""", """Blip2PreTrainedModel""", """Blip2ForConditionalGeneration""", """Blip2VisionModel""", ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys _a: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
162
'''simple docstring''' from collections import Counter from timeit import timeit def _lowerCAmelCase ( lowercase = "" , ) -> bool: return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2 def _lowerCAmelCase ( lowercase = "" ) -> bool: if len(lowercase ) == 0: return True __lowerCAmelCase = input_str.replace(""" """ , """""" ).lower() # character_freq_dict: Stores the frequency of every character in the input string __lowerCAmelCase = {} for character in lower_case_input_str: __lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1 __lowerCAmelCase = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def _lowerCAmelCase ( lowercase = "" ) -> None: print("""\nFor string = """ , lowercase , """:""" ) print( """> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) print( """> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) if __name__ == "__main__": _a : int = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) _a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str) print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
689
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {"""vocab_file""": """spm_char.model"""} _UpperCamelCase = { """vocab_file""": { """microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""", """microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""", """microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""", } } _UpperCamelCase = { """microsoft/speecht5_asr""": 1024, """microsoft/speecht5_tts""": 1024, """microsoft/speecht5_vc""": 1024, } class __magic_name__ ( lowerCAmelCase_ ): """simple docstring""" lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["""input_ids""", """attention_mask"""] def __init__( self , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase = None , **lowerCamelCase , ): '''simple docstring''' __A : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __A : int = vocab_file __A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' return self.sp_model.get_piece_size() def lowerCAmelCase__ ( self ): '''simple docstring''' __A : List[Any] = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): '''simple docstring''' __A : Any = self.__dict__.copy() __A : Optional[int] = None return state def __setstate__( self , lowerCamelCase ): '''simple docstring''' __A : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __A : List[str] = {} __A : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase__ ( self , lowerCamelCase ): '''simple docstring''' return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self , lowerCamelCase ): '''simple docstring''' return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self , lowerCamelCase ): '''simple docstring''' __A : Optional[int] = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) return token def lowerCAmelCase__ ( self , lowerCamelCase ): '''simple docstring''' __A : Tuple = [] __A : Dict = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token __A : int = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string.strip() def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase=None ): '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) __A : str = [1] if token_ids_a is None: return ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones return ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase = None ): '''simple docstring''' if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return __A : List[str] = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , "wb" ) as fi: __A : Any = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
111
'''simple docstring''' import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _lowerCAmelCase ( lowercase ) -> List[Any]: __lowerCAmelCase = VideoMAEConfig() set_architecture_configs(lowercase , lowercase ) if "finetuned" not in model_name: __lowerCAmelCase = False if "finetuned" in model_name: __lowerCAmelCase = """huggingface/label-files""" if "kinetics" in model_name: __lowerCAmelCase = 400 __lowerCAmelCase = """kinetics400-id2label.json""" elif "ssv2" in model_name: __lowerCAmelCase = 174 __lowerCAmelCase = """something-something-v2-id2label.json""" else: raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" ) __lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) ) __lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} return config def _lowerCAmelCase ( lowercase , lowercase ) -> Any: if "small" in model_name: __lowerCAmelCase = 384 __lowerCAmelCase = 1536 __lowerCAmelCase = 12 __lowerCAmelCase = 16 __lowerCAmelCase = 12 __lowerCAmelCase = 3 __lowerCAmelCase = 192 __lowerCAmelCase = 768 elif "large" in model_name: __lowerCAmelCase = 1024 __lowerCAmelCase = 4096 __lowerCAmelCase = 24 __lowerCAmelCase = 16 __lowerCAmelCase = 12 __lowerCAmelCase = 8 __lowerCAmelCase = 512 __lowerCAmelCase = 2048 elif "huge" in model_name: __lowerCAmelCase = 1280 __lowerCAmelCase = 5120 __lowerCAmelCase = 32 __lowerCAmelCase = 16 __lowerCAmelCase = 12 __lowerCAmelCase = 8 __lowerCAmelCase = 640 __lowerCAmelCase = 2560 elif "base" not in model_name: raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" ) def _lowerCAmelCase ( lowercase ) -> List[str]: if "encoder." in name: __lowerCAmelCase = name.replace("""encoder.""" , """""" ) if "cls_token" in name: __lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" ) if "decoder_pos_embed" in name: __lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: __lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" ) if "decoder.blocks" in name: __lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: __lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" ) if "attn.proj" in name: __lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "bias" not in name: __lowerCAmelCase = name.replace("""attn""" , """attention.self""" ) if "attn" in name: __lowerCAmelCase = name.replace("""attn""" , """attention.attention""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: __lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: __lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: __lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: __lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: __lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" ) if "head" in name and "decoder" not in name: __lowerCAmelCase = name.replace("""head""" , """classifier""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]: for key in orig_state_dict.copy().keys(): __lowerCAmelCase = orig_state_dict.pop(lowercase ) if key.startswith("""encoder.""" ): __lowerCAmelCase = key.replace("""encoder.""" , """""" ) if "qkv" in key: __lowerCAmelCase = key.split(""".""" ) if key.startswith("""decoder.blocks""" ): __lowerCAmelCase = config.decoder_hidden_size __lowerCAmelCase = int(key_split[2] ) __lowerCAmelCase = """decoder.decoder_layers.""" if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[dim : dim * 2, :] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = config.hidden_size __lowerCAmelCase = int(key_split[1] ) __lowerCAmelCase = """videomae.encoder.layer.""" if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[dim : dim * 2, :] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val return orig_state_dict def _lowerCAmelCase ( ) -> str: __lowerCAmelCase = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __lowerCAmelCase = np.load(lowercase ) return list(lowercase ) def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __lowerCAmelCase = get_videomae_config(lowercase ) if "finetuned" in model_name: __lowerCAmelCase = VideoMAEForVideoClassification(lowercase ) else: __lowerCAmelCase = VideoMAEForPreTraining(lowercase ) # download original checkpoint, hosted on Google Drive __lowerCAmelCase = """pytorch_model.bin""" gdown.cached_download(lowercase , lowercase , quiet=lowercase ) __lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" ) if "model" in files: __lowerCAmelCase = files["""model"""] else: __lowerCAmelCase = files["""module"""] __lowerCAmelCase = convert_state_dict(lowercase , lowercase ) model.load_state_dict(lowercase ) model.eval() # verify model on basic input __lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) __lowerCAmelCase = prepare_video() __lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" ) if "finetuned" not in model_name: __lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" ) __lowerCAmelCase = torch.load(lowercase ) __lowerCAmelCase = model(**lowercase ) __lowerCAmelCase = outputs.logits __lowerCAmelCase = [ """videomae-small-finetuned-kinetics""", """videomae-small-finetuned-ssv2""", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) """videomae-base-short""", """videomae-base-short-finetuned-kinetics""", """videomae-base""", """videomae-base-finetuned-kinetics""", """videomae-large""", """videomae-large-finetuned-kinetics""", """videomae-huge-finetuned-kinetics""", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) """videomae-base-short-ssv2""", """videomae-base-short-finetuned-ssv2""", """videomae-base-ssv2""", """videomae-base-finetuned-ssv2""", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": __lowerCAmelCase = torch.Size([1, 174] ) __lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one __lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": __lowerCAmelCase = torch.Size([1, 174] ) __lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": __lowerCAmelCase = torch.Size([1, 174] ) __lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(f'Model name not supported. Should be one of {model_names}' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 ) else: print("""Logits:""" , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 ) print("""Logits ok!""" ) # verify loss, if applicable if model_name == "videomae-base-short": __lowerCAmelCase = outputs.loss assert torch.allclose(lowercase , lowercase , atol=1e-4 ) print("""Loss ok!""" ) if pytorch_dump_folder_path is not None: print(f'Saving model and image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowercase ) model.save_pretrained(lowercase ) if push_to_hub: print("""Pushing to the hub...""" ) model.push_to_hub(lowercase , organization="""nielsr""" ) if __name__ == "__main__": _a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""", type=str, help=( """URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct""" """ download link.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default="""/Users/nielsrogge/Documents/VideoMAE/Test""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : int = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
689
0
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) a_ = _symbol_database.Default() a_ = _descriptor_pool.Default().AddSerializedFile( B'\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) a_ = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: a_ = None a_ = b"""H\003""" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" a_ = 45 a_ = 1_581 a_ = 1_517 a_ = 1_570 a_ = 1_584 a_ = 1_793 a_ = 1_795 a_ = 1_916 a_ = 1_864 a_ = 1_905 a_ = 1_919 a_ = 2_429 a_ = 2_208 a_ = 2_418 a_ = 2_323 a_ = 2_407 # @@protoc_insertion_point(module_scope)
685
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _a : Tuple = """\ """ _a : Tuple = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ _a : Optional[Any] = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): def lowerCamelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": __lowerCAmelCase = """cuda""" else: __lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu""" __lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: __lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__SCREAMING_SNAKE_CASE ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" __lowerCAmelCase = model.config.max_length - 1 else: __lowerCAmelCase = model.config.max_length __lowerCAmelCase = tokenizer( __SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = encodings["""input_ids"""] __lowerCAmelCase = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." __lowerCAmelCase = [] __lowerCAmelCase = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ): __lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = encoded_texts[start_index:end_index] __lowerCAmelCase = attn_masks[start_index:end_index] if add_start_token: __lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 ) __lowerCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 ) __lowerCAmelCase = encoded_batch with torch.no_grad(): __lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits __lowerCAmelCase = out_logits[..., :-1, :].contiguous() __lowerCAmelCase = labels[..., 1:].contiguous() __lowerCAmelCase = attn_mask[..., 1:].contiguous() __lowerCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
689
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _UpperCAmelCase ( unittest.TestCase ): def lowerCAmelCase__ ( self : int ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCAmelCase__ ( self : str ): '''simple docstring''' torch.manual_seed(0 ) lowercase_ : int = UNetaDModel( sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return model @property def lowerCAmelCase__ ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) lowercase_ : int = UNetaDConditionModel( sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=1_0 , ) return model @property def lowerCAmelCase__ ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) lowercase_ : Tuple = AutoencoderKL( sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , ) lowercase_ : int = UNetaDModel( sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return vqvae, unet @slow def lowerCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase_ : Tuple = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) lowercase_ : List[Any] = DDPMScheduler() lowercase_ : Union[str, Any] = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(4_2 ) lowercase_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 ) lowercase_ : Tuple = output.audios[0] lowercase_ : Optional[Any] = output.images[0] lowercase_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(4_2 ) lowercase_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) lowercase_ : Tuple = np.frombuffer(image.tobytes() , dtype="uint8" )[:1_0] lowercase_ : Optional[int] = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:1_0] lowercase_ : Optional[Any] = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 lowercase_ : Tuple = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) lowercase_ : List[str] = DDIMScheduler() lowercase_ : str = self.dummy_vqvae_and_unet lowercase_ : Optional[int] = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) np.random.seed(0 ) lowercase_ : Any = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) lowercase_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(4_2 ) lowercase_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=1_0 ) lowercase_ : str = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) lowercase_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype="uint8" )[:1_0] lowercase_ : Union[str, Any] = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 lowercase_ : Tuple = self.dummy_unet_condition lowercase_ : List[Any] = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) np.random.seed(0 ) lowercase_ : List[str] = torch.rand((1, 1, 1_0) ) lowercase_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE ) lowercase_ : Any = output.images[0] lowercase_ : Union[str, Any] = np.frombuffer(image.tobytes() , dtype="uint8" )[:1_0] lowercase_ : Union[str, Any] = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def lowerCAmelCase__ ( self : List[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self : List[str] ): '''simple docstring''' lowercase_ : Tuple = torch_device lowercase_ : List[Any] = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" ) lowercase_ : Any = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowercase_ : Any = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(4_2 ) lowercase_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = output.audios[0] lowercase_ : Optional[int] = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] lowercase_ : Optional[int] = np.frombuffer(image.tobytes() , dtype="uint8" )[:1_0] lowercase_ : str = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
620
'''simple docstring''' from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class _UpperCAmelCase ( lowerCAmelCase_ ): a : Union[str, Any] =["""image_processor"""] a : Dict ="""SamImageProcessor""" def __init__( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.image_processor __lowerCAmelCase = -10 __lowerCAmelCase = self.image_processor.size["""longest_edge"""] def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = self.image_processor( __SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,) # pop arguments that are not used in the foward but used nevertheless __lowerCAmelCase = encoding_image_processor["""original_sizes"""] if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor __lowerCAmelCase = original_sizes.numpy() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points( input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = self._normalize_and_convert( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,) return encoding_image_processor def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",): '''simple docstring''' if input_points is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points ] else: __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: __lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_labels is not None: __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_boxes is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE ) for box in input_boxes ] else: __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE ) for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) ] __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_boxes is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # boxes batch size of 1 by default __lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # boxes batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"""input_boxes""": input_boxes} ) if input_points is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"""input_points""": input_points} ) if input_labels is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"""input_labels""": input_labels} ) return encoding_image_processor def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = max([point.shape[0] for point in input_points] ) __lowerCAmelCase = [] for i, point in enumerate(__SCREAMING_SNAKE_CASE ): if point.shape[0] != expected_nb_points: __lowerCAmelCase = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 ) __lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] ) processed_input_points.append(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = processed_input_points return input_points, input_labels def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase = original_size __lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE ) if is_bounding_box: __lowerCAmelCase = coords.reshape(-1,2,2 ) __lowerCAmelCase = coords[..., 0] * (new_w / old_w) __lowerCAmelCase = coords[..., 1] * (new_h / old_h) if is_bounding_box: __lowerCAmelCase = coords.reshape(-1,4 ) return coords def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,): '''simple docstring''' if input_points is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor __lowerCAmelCase = input_points.numpy().tolist() if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ): raise ValueError("""Input points must be a list of list of floating points.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points] else: __lowerCAmelCase = None if input_labels is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): __lowerCAmelCase = input_labels.numpy().tolist() if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ): raise ValueError("""Input labels must be a list of list integers.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels] else: __lowerCAmelCase = None if input_boxes is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): __lowerCAmelCase = input_boxes.numpy().tolist() if ( not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE ) or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE ) ): raise ValueError("""Input boxes must be a list of list of list of floating points.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes] else: __lowerCAmelCase = None return input_points, input_labels, input_boxes @property def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) ) def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
689
0
'''simple docstring''' import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): __magic_name__ : Union[str, Any] = BertJapaneseTokenizer __magic_name__ : Optional[Any] = False __magic_name__ : str = True def lowercase_ ( self : Optional[Any] ): '''simple docstring''' super().setUp() a_ : List[Any] = [ """[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは""", """世界""", """##世界""", """、""", """##、""", """。""", """##。""", ] a_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def lowercase_ ( self : Optional[int] , lowercase__ : Dict ): '''simple docstring''' a_ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、世界。""" a_ : Optional[int] = """こんにちは 、 世界 。 こんばんは 、 世界 。""" return input_text, output_text def lowercase_ ( self : str , lowercase__ : Any ): '''simple docstring''' a_ , a_ : List[Any] = self.get_input_output_texts(__SCREAMING_SNAKE_CASE ) a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) a_ : List[str] = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) return text, ids def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' pass # TODO add if relevant def lowercase_ ( self : List[Any] ): '''simple docstring''' pass # TODO add if relevant def lowercase_ ( self : Tuple ): '''simple docstring''' pass # TODO add if relevant def lowercase_ ( self : List[Any] ): '''simple docstring''' a_ : Optional[int] = self.tokenizer_class(self.vocab_file ) a_ : Dict = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' a_ : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) a_ : List[str] = """こんにちは、世界。\nこんばんは、世界。""" a_ : List[Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) a_ : Any = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(__SCREAMING_SNAKE_CASE , """wb""" ) as handle: pickle.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """rb""" ) as handle: a_ : Tuple = pickle.load(__SCREAMING_SNAKE_CASE ) a_ : Tuple = tokenizer_new.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase_ ( self : Tuple ): '''simple docstring''' a_ : List[Any] = MecabTokenizer(mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def lowercase_ ( self : str ): '''simple docstring''' try: a_ : Union[str, Any] = MecabTokenizer(mecab_dic="""unidic_lite""" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def lowercase_ ( self : List[str] ): '''simple docstring''' try: a_ : str = MecabTokenizer(mecab_dic="""unidic""" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def lowercase_ ( self : str ): '''simple docstring''' a_ : Union[str, Any] = MecabTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def lowercase_ ( self : List[str] ): '''simple docstring''' try: a_ : Optional[int] = MecabTokenizer( do_lower_case=__SCREAMING_SNAKE_CASE , normalize_text=__SCREAMING_SNAKE_CASE , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) def lowercase_ ( self : List[Any] ): '''simple docstring''' a_ : Dict = MecabTokenizer(normalize_text=__SCREAMING_SNAKE_CASE , mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , ) @require_sudachi def lowercase_ ( self : str ): '''simple docstring''' a_ : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) a_ : Optional[int] = """こんにちは、世界。\nこんばんは、世界。""" a_ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) a_ : Optional[int] = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(__SCREAMING_SNAKE_CASE , """wb""" ) as handle: pickle.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """rb""" ) as handle: a_ : Union[str, Any] = pickle.load(__SCREAMING_SNAKE_CASE ) a_ : int = tokenizer_new.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @require_sudachi def lowercase_ ( self : Any ): '''simple docstring''' a_ : Dict = SudachiTokenizer(sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , ) @require_sudachi def lowercase_ ( self : List[Any] ): '''simple docstring''' a_ : List[Any] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] ) @require_sudachi def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' a_ : Any = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] ) @require_sudachi def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' a_ : Any = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] ) @require_sudachi def lowercase_ ( self : Tuple ): '''simple docstring''' a_ : Dict = SudachiTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , ) @require_sudachi def lowercase_ ( self : str ): '''simple docstring''' a_ : Tuple = SudachiTokenizer(normalize_text=__SCREAMING_SNAKE_CASE , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , ) @require_sudachi def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' a_ : Dict = SudachiTokenizer(trim_whitespace=__SCREAMING_SNAKE_CASE , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) @require_jumanpp def lowercase_ ( self : Dict ): '''simple docstring''' a_ : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) a_ : Optional[int] = """こんにちは、世界。\nこんばんは、世界。""" a_ : str = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) a_ : Tuple = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(__SCREAMING_SNAKE_CASE , """wb""" ) as handle: pickle.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """rb""" ) as handle: a_ : str = pickle.load(__SCREAMING_SNAKE_CASE ) a_ : Tuple = tokenizer_new.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @require_jumanpp def lowercase_ ( self : Dict ): '''simple docstring''' a_ : List[Any] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def lowercase_ ( self : int ): '''simple docstring''' a_ : List[Any] = JumanppTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def lowercase_ ( self : List[str] ): '''simple docstring''' a_ : Union[str, Any] = JumanppTokenizer(normalize_text=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def lowercase_ ( self : Optional[int] ): '''simple docstring''' a_ : Any = JumanppTokenizer(trim_whitespace=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , ) @require_jumanpp def lowercase_ ( self : int ): '''simple docstring''' a_ : str = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' a_ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""] a_ : Dict = {} for i, token in enumerate(__SCREAMING_SNAKE_CASE ): a_ : Union[str, Any] = i a_ : str = WordpieceTokenizer(vocab=__SCREAMING_SNAKE_CASE , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] ) self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] ) self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] ) def lowercase_ ( self : Any ): '''simple docstring''' a_ : Optional[int] = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" ) a_ : Dict = tokenizer.subword_tokenizer a_ : Tuple = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] ) a_ : int = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] ) def lowercase_ ( self : Tuple ): '''simple docstring''' a_ : List[Any] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" ) a_ : str = tokenizer.encode("""ありがとう。""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) a_ : Optional[int] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) a_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE ) a_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): __magic_name__ : Tuple = BertJapaneseTokenizer __magic_name__ : List[Any] = False def lowercase_ ( self : List[Any] ): '''simple docstring''' super().setUp() a_ : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""] a_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def lowercase_ ( self : Union[str, Any] , **lowercase__ : Optional[int] ): '''simple docstring''' return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **__SCREAMING_SNAKE_CASE ) def lowercase_ ( self : Union[str, Any] , lowercase__ : str ): '''simple docstring''' a_ : int = """こんにちは、世界。 \nこんばんは、世界。""" a_ : List[str] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。""" return input_text, output_text def lowercase_ ( self : Dict ): '''simple docstring''' pass # TODO add if relevant def lowercase_ ( self : Any ): '''simple docstring''' pass # TODO add if relevant def lowercase_ ( self : Tuple ): '''simple docstring''' pass # TODO add if relevant def lowercase_ ( self : int ): '''simple docstring''' a_ : Any = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" ) a_ : Dict = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" ) self.assertListEqual( __SCREAMING_SNAKE_CASE , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def lowercase_ ( self : List[str] ): '''simple docstring''' a_ : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""] a_ : List[str] = {} for i, token in enumerate(__SCREAMING_SNAKE_CASE ): a_ : Any = i a_ : Optional[int] = CharacterTokenizer(vocab=__SCREAMING_SNAKE_CASE , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] ) self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] ) def lowercase_ ( self : List[str] ): '''simple docstring''' a_ : Dict = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" ) a_ : Any = tokenizer.encode("""ありがとう。""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) a_ : Any = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) a_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE ) a_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def lowercase_ ( self : Optional[Any] ): '''simple docstring''' a_ : Optional[int] = """cl-tohoku/bert-base-japanese""" a_ : str = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def lowercase_ ( self : Optional[Any] ): '''simple docstring''' a_ : int = """cl-tohoku/bert-base-japanese""" with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm: BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertTrue( cm.records[0].message.startswith( """The tokenizer class you load from this checkpoint is not the same type as the class this function""" """ is called from.""" ) ) a_ : List[str] = """bert-base-cased""" with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm: BertJapaneseTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertTrue( cm.records[0].message.startswith( """The tokenizer class you load from this checkpoint is not the same type as the class this function""" """ is called from.""" ) )
442
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.17.0.dev0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") _a : int = logging.getLogger(__name__) @dataclass class _UpperCAmelCase : a : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) a : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , ) a : int =field( default=10_24 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) a : bool =field( default=lowerCAmelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} ) a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} ) def lowerCamelCase__ ( self ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: __lowerCAmelCase = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __lowerCAmelCase = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _UpperCAmelCase : a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a : str =field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a : bool =field( default=lowerCAmelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def _lowerCAmelCase ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) __lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(lowercase ) datasets.utils.logging.set_verbosity(lowercase ) transformers.utils.logging.set_verbosity(lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __lowerCAmelCase = data_args.train_file.split(""".""" )[-1] __lowerCAmelCase = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __lowerCAmelCase = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f'load a local file for {key}: {data_files[key]}' ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files __lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names __lowerCAmelCase = len(lowercase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __lowerCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , ) __lowerCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __lowerCAmelCase = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __lowerCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. __lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1} __lowerCAmelCase = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(lowercase ): # Tokenize the texts def _convert_table_text_to_pandas(lowercase ): __lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] __lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __lowerCAmelCase = examples["""statement"""] __lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) __lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase ) __lowerCAmelCase = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): __lowerCAmelCase = raw_datasets.map( lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) __lowerCAmelCase = raw_datasets["""train"""] if data_args.max_train_samples is not None: __lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) __lowerCAmelCase = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: __lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) __lowerCAmelCase = raw_datasets["""test"""] if data_args.max_predict_samples is not None: __lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(lowercase ) ) , 3 ): logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowercase ): __lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions __lowerCAmelCase = np.argmax(lowercase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __lowerCAmelCase = default_data_collator elif training_args.fpaa: __lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 ) else: __lowerCAmelCase = None # Initialize our Trainer __lowerCAmelCase = Trainer( model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , ) # Training if training_args.do_train: __lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: __lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCAmelCase = last_checkpoint __lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase ) __lowerCAmelCase = train_result.metrics __lowerCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase ) ) __lowerCAmelCase = min(lowercase , len(lowercase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , lowercase ) trainer.save_metrics("""train""" , lowercase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase ) __lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase ) __lowerCAmelCase = min(lowercase , len(lowercase ) ) trainer.log_metrics("""eval""" , lowercase ) trainer.save_metrics("""eval""" , lowercase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __lowerCAmelCase = predict_dataset.remove_columns("""label""" ) __lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions __lowerCAmelCase = np.argmax(lowercase , axis=1 ) __lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(lowercase , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(lowercase ): __lowerCAmelCase = label_list[item] writer.write(f'{index}\t{item}\n' ) __lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**lowercase ) else: trainer.create_model_card(**lowercase ) def _lowerCAmelCase ( lowercase ) -> str: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
689
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCamelCase = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""ConvNextFeatureExtractor"""] __lowerCamelCase = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
204
'''simple docstring''' import os import sys import unittest _a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""") class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = find_backend(""" if not is_torch_available():""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") __lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") __lowerCAmelCase = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""",__SCREAMING_SNAKE_CASE ) self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE ) self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE ) self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""",objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" ) __lowerCAmelCase = create_dummy_object("""function""","""'torch'""" ) self.assertEqual( __SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) __lowerCAmelCase = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ __lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ __lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
689
0
def __snake_case ( __magic_name__ ): '''simple docstring''' lowercase = [] lowercase = set({"(", "[", "{"} ) lowercase = set({")", "]", "}"} ) lowercase = {"{": "}", "[": "]", "(": ")"} for i in range(len(__magic_name__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(__magic_name__ ) == 0 or (len(__magic_name__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(__magic_name__ ) == 0 def __snake_case ( ): '''simple docstring''' lowercase = input("Enter sequence of brackets: " ) if is_balanced(__magic_name__ ): print(__magic_name__ , "is balanced" ) else: print(__magic_name__ , "is not balanced" ) if __name__ == "__main__": main()
441
'''simple docstring''' def _lowerCAmelCase ( lowercase ) -> tuple[int, int]: try: __lowerCAmelCase = float(lowercase ) except ValueError: raise ValueError("""Please enter a valid number""" ) __lowerCAmelCase = decimal - int(lowercase ) if fractional_part == 0: return int(lowercase ), 1 else: __lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] ) __lowerCAmelCase = int(decimal * (10**number_of_frac_digits) ) __lowerCAmelCase = 10**number_of_frac_digits __lowerCAmelCase , __lowerCAmelCase = denominator, numerator while True: __lowerCAmelCase = dividend % divisor if remainder == 0: break __lowerCAmelCase , __lowerCAmelCase = divisor, remainder __lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor return int(lowercase ), int(lowercase ) if __name__ == "__main__": print(f'{decimal_to_fraction(2) = }') print(f'{decimal_to_fraction(89.0) = }') print(f'{decimal_to_fraction("67") = }') print(f'{decimal_to_fraction("45.0") = }') print(f'{decimal_to_fraction(1.5) = }') print(f'{decimal_to_fraction("6.25") = }') print(f'{decimal_to_fraction("78td") = }')
689
0
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class __SCREAMING_SNAKE_CASE (nn.Module ): """simple docstring""" def __init__( self , UpperCamelCase__ = 16 , UpperCamelCase__ = 88 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 32 , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "geglu" , UpperCamelCase__ = None , ): """simple docstring""" super().__init__() a_ = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , cross_attention_dim=__SCREAMING_SNAKE_CASE , attention_bias=__SCREAMING_SNAKE_CASE , sample_size=__SCREAMING_SNAKE_CASE , num_vector_embeds=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , num_embeds_ada_norm=__SCREAMING_SNAKE_CASE , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference a_ = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` a_ = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` a_ = [1, 0] def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = True , ): """simple docstring""" a_ = hidden_states a_ = [] a_ = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens a_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] a_ = self.transformer_index_for_condition[i] a_ = self.transformers[transformer_index]( __SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , cross_attention_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] a_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) a_ = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__SCREAMING_SNAKE_CASE )
536
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _a : Dict = _symbol_database.Default() _a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile( b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03""" ) _a : str = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals) if _descriptor._USE_C_DESCRIPTORS is False: _a : str = None _a : Union[str, Any] = b"""H\003""" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _a : Optional[int] = 4_5 _a : List[Any] = 1_5_8_1 _a : str = 1_5_1_7 _a : Optional[Any] = 1_5_7_0 _a : List[str] = 1_5_8_4 _a : List[Any] = 1_7_9_3 _a : Union[str, Any] = 1_7_9_5 _a : Tuple = 1_9_1_6 _a : List[Any] = 1_8_6_4 _a : Any = 1_9_0_5 _a : Optional[Any] = 1_9_1_9 _a : Optional[int] = 2_4_2_9 _a : Tuple = 2_2_0_8 _a : Optional[Any] = 2_4_1_8 _a : List[Any] = 2_3_2_3 _a : str = 2_4_0_7 # @@protoc_insertion_point(module_scope)
689
0
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ): '''simple docstring''' try: __lowerCAmelCase = int(_UpperCamelCase ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) __lowerCAmelCase = 2 __lowerCAmelCase = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 __lowerCAmelCase = i while n % i == 0: __lowerCAmelCase = n // i i += 1 return int(_UpperCamelCase ) if __name__ == "__main__": print(f'''{solution() = }''')
636
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class _UpperCAmelCase ( lowerCAmelCase_ ): a : torch.FloatTensor class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,): '''simple docstring''' super().__init__() __lowerCAmelCase = layers_per_block __lowerCAmelCase = torch.nn.Convad( __SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,) __lowerCAmelCase = None __lowerCAmelCase = nn.ModuleList([] ) # down __lowerCAmelCase = block_out_channels[0] for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = output_channel __lowerCAmelCase = block_out_channels[i] __lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1 __lowerCAmelCase = get_down_block( __SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) self.down_blocks.append(__SCREAMING_SNAKE_CASE ) # mid __lowerCAmelCase = UNetMidBlockaD( in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) # out __lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 ) __lowerCAmelCase = nn.SiLU() __lowerCAmelCase = 2 * out_channels if double_z else out_channels __lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 ) __lowerCAmelCase = False def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = x __lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE ) if self.training and self.gradient_checkpointing: def create_custom_forward(__SCREAMING_SNAKE_CASE ): def custom_forward(*__SCREAMING_SNAKE_CASE ): return module(*__SCREAMING_SNAKE_CASE ) return custom_forward # down if is_torch_version(""">=""","""1.11.0""" ): for down_block in self.down_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) else: for down_block in self.down_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE ) else: # down for down_block in self.down_blocks: __lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE ) # post-process __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE ) return sample class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",): '''simple docstring''' super().__init__() __lowerCAmelCase = layers_per_block __lowerCAmelCase = nn.Convad( __SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,) __lowerCAmelCase = None __lowerCAmelCase = nn.ModuleList([] ) __lowerCAmelCase = in_channels if norm_type == """spatial""" else None # mid __lowerCAmelCase = UNetMidBlockaD( in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) # up __lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = output_channel __lowerCAmelCase = reversed_block_out_channels[i] __lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1 __lowerCAmelCase = get_up_block( __SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,) self.up_blocks.append(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = output_channel # out if norm_type == "spatial": __lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 ) __lowerCAmelCase = nn.SiLU() __lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 ) __lowerCAmelCase = False def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' __lowerCAmelCase = z __lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(__SCREAMING_SNAKE_CASE ): def custom_forward(*__SCREAMING_SNAKE_CASE ): return module(*__SCREAMING_SNAKE_CASE ) return custom_forward if is_torch_version(""">=""","""1.11.0""" ): # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) else: # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) else: # middle __lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) # post-process if latent_embeds is None: __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE ) return sample class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ): '''simple docstring''' super().__init__() __lowerCAmelCase = n_e __lowerCAmelCase = vq_embed_dim __lowerCAmelCase = beta __lowerCAmelCase = legacy __lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e ) __lowerCAmelCase = remap if self.remap is not None: self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) ) __lowerCAmelCase = self.used.shape[0] __lowerCAmelCase = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": __lowerCAmelCase = self.re_embed __lowerCAmelCase = self.re_embed + 1 print( f'Remapping {self.n_e} indices to {self.re_embed} indices. ' f'Using {self.unknown_index} for unknown indices.' ) else: __lowerCAmelCase = n_e __lowerCAmelCase = sane_index_shape def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = inds.shape assert len(__SCREAMING_SNAKE_CASE ) > 1 __lowerCAmelCase = inds.reshape(ishape[0],-1 ) __lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long() __lowerCAmelCase = match.argmax(-1 ) __lowerCAmelCase = match.sum(2 ) < 1 if self.unknown_index == "random": __lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device ) else: __lowerCAmelCase = self.unknown_index return new.reshape(__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = inds.shape assert len(__SCREAMING_SNAKE_CASE ) > 1 __lowerCAmelCase = inds.reshape(ishape[0],-1 ) __lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE ) if self.re_embed > self.used.shape[0]: # extra token __lowerCAmelCase = 0 # simply set to zero __lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE ) return back.reshape(__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = z.permute(0,2,3,1 ).contiguous() __lowerCAmelCase = z.view(-1,self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z __lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 ) __lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape ) __lowerCAmelCase = None __lowerCAmelCase = None # compute loss for embedding if not self.legacy: __lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: __lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients __lowerCAmelCase = z + (z_q - z).detach() # reshape back to match original input shape __lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous() if self.remap is not None: __lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis __lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten if self.sane_index_shape: __lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): '''simple docstring''' if self.remap is not None: __lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis __lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = indices.reshape(-1 ) # flatten again # get quantized latent vectors __lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ) if shape is not None: __lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE ) # reshape back to match original input shape __lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous() return z_q class _UpperCAmelCase ( lowerCAmelCase_ ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase = parameters __lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 ) __lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 ) __lowerCAmelCase = deterministic __lowerCAmelCase = torch.exp(0.5 * self.logvar ) __lowerCAmelCase = torch.exp(self.logvar ) if self.deterministic: __lowerCAmelCase = __lowerCAmelCase = torch.zeros_like( self.mean,device=self.parameters.device,dtype=self.parameters.dtype ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ): '''simple docstring''' __lowerCAmelCase = randn_tensor( self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype ) __lowerCAmelCase = self.mean + self.std * sample return x def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean,2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar,dim=[1, 2, 3],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) __lowerCAmelCase = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): '''simple docstring''' return self.mean
689
0
"""simple docstring""" from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _lowerCAmelCase : __UpperCAmelCase : int __UpperCAmelCase : TreeNode | None = None __UpperCAmelCase : TreeNode | None = None __snake_case = namedtuple("""CoinsDistribResult""", """moves excess""") def __lowerCAmelCase ( lowercase : Optional[Any] ) -> int: """simple docstring""" if root is None: return 0 # Validation def count_nodes(lowercase : int ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(lowercase : str ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(lowercase ) != count_coins(lowercase ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(lowercase : Any ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) snake_case ,snake_case : Optional[Any] = get_distrib(node.left ) snake_case ,snake_case : Dict = get_distrib(node.right ) snake_case : Optional[Any] = 1 - left_distrib_excess snake_case : Dict = 1 - right_distrib_excess snake_case : Optional[Any] = ( left_distrib_moves + right_distrib_moves + abs(lowercase ) + abs(lowercase ) ) snake_case : int = node.data - coins_to_left - coins_to_right return CoinsDistribResult(lowercase , lowercase ) return get_distrib(lowercase )[0] if __name__ == "__main__": import doctest doctest.testmod()
178
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features _a : Optional[int] = logging.get_logger(__name__) _a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) _a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} ) a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} ) a : int =field( default=1_28 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : int =field( default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , ) a : int =field( default=64 , metadata={ """help""": ( """The maximum number of tokens for the question. Questions longer than this will """ """be truncated to this length.""" ) } , ) a : int =field( default=30 , metadata={ """help""": ( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ) } , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} ) a : float =field( default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) a : int =field( default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) a : int =field( default=0 , metadata={ """help""": ( """language id of input for language-specific xlm models (see""" """ tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)""" ) } , ) a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} ) class _UpperCAmelCase ( lowerCAmelCase_ ): a : Optional[Any] ="""train""" a : Optional[int] ="""dev""" class _UpperCAmelCase ( lowerCAmelCase_ ): a : SquadDataTrainingArguments a : List[SquadFeatures] a : Split a : bool def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",): '''simple docstring''' __lowerCAmelCase = args __lowerCAmelCase = is_language_sensitive __lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): try: __lowerCAmelCase = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) __lowerCAmelCase = mode # Load data features from cache or dataset file __lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1""" __lowerCAmelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowerCAmelCase = cached_features_file + """.lock""" with FileLock(__SCREAMING_SNAKE_CASE ): if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache: __lowerCAmelCase = time.time() __lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. __lowerCAmelCase = self.old_features["""features"""] __lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE ) logger.info( f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in' """ future run""" ) else: if mode == Split.dev: __lowerCAmelCase = self.processor.get_dev_examples(args.data_dir ) else: __lowerCAmelCase = self.processor.get_train_examples(args.data_dir ) __lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features( examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = time.time() torch.save( {"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = self.features[i] __lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float ) __lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float ) __lowerCAmelCase = { """input_ids""": input_ids, """attention_mask""": attention_mask, """token_type_ids""": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} ) if self.args.version_2_with_negative: inputs.update({"""is_impossible""": is_impossible} ) if self.is_language_sensitive: inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: __lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long ) inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} ) return inputs
689
0
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class a ( lowerCAmelCase_ ): __lowerCAmelCase : List[Any] = """Wav2Vec2FeatureExtractor""" __lowerCAmelCase : int = """AutoTokenizer""" def __init__( self :int ,__lowercase :Optional[int] ,__lowercase :Any ): super().__init__(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[int] = self.feature_extractor snake_case__ : Any = False @classmethod def __lowerCamelCase ( cls :Union[str, Any] ,__lowercase :Optional[Any] ,**__lowercase :List[str] ): try: return super().from_pretrained(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) except OSError: warnings.warn( F"""Loading a tokenizer inside {cls.__name__} from a config that does not""" ''' include a `tokenizer_class` attribute is deprecated and will be ''' '''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`''' ''' attribute to either your `config.json` or `tokenizer_config.json` ''' '''file to suppress this warning: ''' ,__SCREAMING_SNAKE_CASE ,) snake_case__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) snake_case__ : int = WavaVecaCTCTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) return cls(feature_extractor=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE ) def __call__( self :List[Any] ,*__lowercase :List[str] ,**__lowercase :Optional[int] ): if self._in_target_context_manager: return self.current_processor(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) snake_case__ : str = kwargs.pop('''raw_speech''' ) else: snake_case__ : Optional[int] = kwargs.pop('''audio''' ,__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = kwargs.pop('''sampling_rate''' ,__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[int] = kwargs.pop('''text''' ,__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: snake_case__ : int = args[0] snake_case__ : str = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: snake_case__ : str = self.feature_extractor(__SCREAMING_SNAKE_CASE ,*__SCREAMING_SNAKE_CASE ,sampling_rate=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) if text is not None: snake_case__ : Tuple = self.tokenizer(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) if text is None: return inputs elif audio is None: return encodings else: snake_case__ : int = encodings['''input_ids'''] return inputs def __lowerCamelCase ( self :Union[str, Any] ,*__lowercase :List[str] ,**__lowercase :Optional[Any] ): if self._in_target_context_manager: return self.current_processor.pad(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) snake_case__ : str = kwargs.pop('''input_features''' ,__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = kwargs.pop('''labels''' ,__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: snake_case__ : Optional[int] = args[0] snake_case__ : Dict = args[1:] if input_features is not None: snake_case__ : int = self.feature_extractor.pad(__SCREAMING_SNAKE_CASE ,*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) if labels is not None: snake_case__ : Optional[Any] = self.tokenizer.pad(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) if labels is None: return input_features elif input_features is None: return labels else: snake_case__ : Optional[Any] = labels['''input_ids'''] return input_features def __lowerCamelCase ( self :Dict ,*__lowercase :List[str] ,**__lowercase :Optional[int] ): return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self :Optional[int] ,*__lowercase :Optional[int] ,**__lowercase :int ): return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) @contextmanager def __lowerCamelCase ( self :Optional[Any] ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) snake_case__ : Optional[int] = True snake_case__ : Optional[Any] = self.tokenizer yield snake_case__ : Optional[int] = self.feature_extractor snake_case__ : Any = False
252
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _lowerCAmelCase ( lowercase ) -> Optional[Any]: # vision encoder if "img_encoder.pos_embed" in name: __lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: __lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: __lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: __lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: __lowerCAmelCase = name.replace("""blocks""" , """layers""" ) if "attn" in name and "pre_assign" not in name: __lowerCAmelCase = name.replace("""attn""" , """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: __lowerCAmelCase = name.replace("""proj""" , """out_proj""" ) if "pre_assign_attn.attn.proj" in name: __lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: __lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" ) if "img_encoder.norm" in name: __lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: __lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: __lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: __lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" ) if "ln_1" in name: __lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: __lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: __lowerCAmelCase = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: __lowerCAmelCase = name.replace("""c_proj""" , """fc2""" ) if "text_encoder" in name: __lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" ) if "ln_final" in name: __lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: __lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" ) if "img_projector.linear_out." in name: __lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: __lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" ) if "text_projector.linear_out" in name: __lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> Dict: for key in orig_state_dict.copy().keys(): __lowerCAmelCase = orig_state_dict.pop(lowercase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __lowerCAmelCase = key.split(""".""" ) __lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] ) __lowerCAmelCase = config.vision_config.hidden_size if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[dim : dim * 2, :] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val[:dim] __lowerCAmelCase = val[dim : dim * 2] __lowerCAmelCase = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __lowerCAmelCase = key.split(""".""" ) __lowerCAmelCase = int(key_split[3] ) __lowerCAmelCase = config.text_config.hidden_size if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[ dim : dim * 2, : ] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val[:dim] __lowerCAmelCase = val[dim : dim * 2] __lowerCAmelCase = val[-dim:] else: __lowerCAmelCase = rename_key(lowercase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): __lowerCAmelCase = val.squeeze_() else: __lowerCAmelCase = val return orig_state_dict def _lowerCAmelCase ( ) -> str: __lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]: __lowerCAmelCase = GroupViTConfig() __lowerCAmelCase = GroupViTModel(lowercase ).eval() __lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""] __lowerCAmelCase = convert_state_dict(lowercase , lowercase ) __lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0) # verify result __lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) __lowerCAmelCase = prepare_img() __lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" ) with torch.no_grad(): __lowerCAmelCase = model(**lowercase ) if model_name == "groupvit-gcc-yfcc": __lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] ) elif model_name == "groupvit-gcc-redcaps": __lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 ) processor.save_pretrained(lowercase ) model.save_pretrained(lowercase ) print("""Successfully saved processor and model to""" , lowercase ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(lowercase , organization="""nielsr""" ) model.push_to_hub(lowercase , organization="""nielsr""" ) if __name__ == "__main__": _a : int = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _a : List[str] = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
689
0
_a: List[Any] = 6_5521 def __lowerCAmelCase ( A ): UpperCAmelCase_ = 1 UpperCAmelCase_ = 0 for plain_chr in plain_text: UpperCAmelCase_ = (a + ord(A )) % MOD_ADLER UpperCAmelCase_ = (b + a) % MOD_ADLER return (b << 16) | a
162
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() _a : Tuple = logging.get_logger(__name__) _a : Optional[int] = ["""model.decoder.embed_positions.weights"""] def _lowerCAmelCase ( lowercase ) -> Optional[Any]: if "emb" in name: __lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: __lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: __lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: __lowerCAmelCase = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: __lowerCAmelCase = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: __lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: __lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: __lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: __lowerCAmelCase = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: __lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]: __lowerCAmelCase = list(state_dict.keys() ) __lowerCAmelCase = {} for key in keys: __lowerCAmelCase = state_dict.pop(lowercase ) __lowerCAmelCase = rename_keys(lowercase ) if "in_proj_weight" in key: # split fused qkv proj __lowerCAmelCase = val[:hidden_size, :] __lowerCAmelCase = val[hidden_size : 2 * hidden_size, :] __lowerCAmelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __lowerCAmelCase = val else: __lowerCAmelCase = val return state_dict, enc_dec_proj_state_dict def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values __lowerCAmelCase = 1024 __lowerCAmelCase = 24 __lowerCAmelCase = 16 elif checkpoint == "medium": __lowerCAmelCase = 1536 __lowerCAmelCase = 48 __lowerCAmelCase = 24 elif checkpoint == "large": __lowerCAmelCase = 2048 __lowerCAmelCase = 48 __lowerCAmelCase = 32 else: raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) __lowerCAmelCase = MusicgenDecoderConfig( hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , ) return config @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]: __lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase ) __lowerCAmelCase = decoder_config_from_checkpoint(lowercase ) __lowerCAmelCase = fairseq_model.lm.state_dict() __lowerCAmelCase , __lowerCAmelCase = rename_state_dict( lowercase , hidden_size=decoder_config.hidden_size ) __lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" ) __lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) __lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(lowercase ) if len(lowercase ) > 0: raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' ) if len(lowercase ) > 0: raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model __lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(lowercase ) # check we can do a forward pass __lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __lowerCAmelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits if logits.shape != (8, 1, 2048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor __lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" ) __lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) __lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase ) # set the appropriate bos/pad token ids __lowerCAmelCase = 2048 __lowerCAmelCase = 2048 # set other default generation config params __lowerCAmelCase = int(30 * audio_encoder.config.frame_rate ) __lowerCAmelCase = True __lowerCAmelCase = 3.0 if pytorch_dump_folder is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(lowercase ) processor.save_pretrained(lowercase ) if repo_id: logger.info(f'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(lowercase ) processor.push_to_hub(lowercase ) if __name__ == "__main__": _a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) _a : List[Any] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
689
0
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def _lowercase (SCREAMING_SNAKE_CASE = 100_0000 , SCREAMING_SNAKE_CASE = 10 ): '''simple docstring''' __A : Dict = defaultdict(SCREAMING_SNAKE_CASE ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __A : Dict = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __A : Tuple = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(F"""{solution() = }""")
111
'''simple docstring''' from collections import deque def _lowerCAmelCase ( lowercase ) -> Dict: __lowerCAmelCase = len(lowercase ) __lowerCAmelCase = deque() __lowerCAmelCase = [False for _ in range(lowercase )] __lowerCAmelCase = [-1 for _ in range(lowercase )] __lowerCAmelCase = index_of[:] def strong_connect(lowercase , lowercase , lowercase ): __lowerCAmelCase = index # the number when this node is seen __lowerCAmelCase = index # lowest rank node reachable from here index += 1 stack.append(lowercase ) __lowerCAmelCase = True for w in g[v]: if index_of[w] == -1: __lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase ) __lowerCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: __lowerCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: __lowerCAmelCase = [] __lowerCAmelCase = stack.pop() __lowerCAmelCase = False component.append(lowercase ) while w != v: __lowerCAmelCase = stack.pop() __lowerCAmelCase = False component.append(lowercase ) components.append(lowercase ) return index __lowerCAmelCase = [] for v in range(lowercase ): if index_of[v] == -1: strong_connect(lowercase , 0 , lowercase ) return components def _lowerCAmelCase ( lowercase , lowercase ) -> str: __lowerCAmelCase = [[] for _ in range(lowercase )] for u, v in edges: g[u].append(lowercase ) return g if __name__ == "__main__": # Test _a : Any = 7 _a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6] _a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5] _a : Optional[Any] = [(u, v) for u, v in zip(source, target)] _a : Optional[int] = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
689
0
'''simple docstring''' def __UpperCAmelCase (lowercase__ ) -> int: '''simple docstring''' a_ = len(lowercase__ ) a_ = len(matrix[0] ) a_ = min(lowercase__ ,lowercase__ ) for row in range(lowercase__ ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 ,lowercase__ ): a_ = matrix[col][row] / matrix[row][row] for i in range(lowercase__ ,lowercase__ ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows a_ = True for i in range(row + 1 ,lowercase__ ): if matrix[i][row] != 0: a_ , a_ = matrix[i], matrix[row] a_ = False break if reduce: rank -= 1 for i in range(lowercase__ ): a_ = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
685
'''simple docstring''' from argparse import ArgumentParser from .env import EnvironmentCommand def _lowerCAmelCase ( ) -> Union[str, Any]: __lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) __lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(lowercase ) # Let's go __lowerCAmelCase = parser.parse_args() if not hasattr(lowercase , """func""" ): parser.print_help() exit(1 ) # Run __lowerCAmelCase = args.func(lowercase ) service.run() if __name__ == "__main__": main()
689
0
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ): __lowerCamelCase: Union[str, Any] = DebertaVaTokenizer __lowerCamelCase: Optional[Any] = DebertaVaTokenizerFast __lowerCamelCase: Optional[int] = True __lowerCamelCase: Optional[int] = True def lowerCAmelCase__ ( self : List[str] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase_ : Optional[Any] = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self : Any , a : Tuple ): '''simple docstring''' lowercase_ : List[str] = "this is a test" lowercase_ : List[Any] = "this is a test" return input_text, output_text def lowerCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ : Union[str, Any] = "<pad>" lowercase_ : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 3_0_0_0_1 ) def lowerCAmelCase__ ( self : Any ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 ) def lowerCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ : Dict = " \tHeLLo!how \n Are yoU? " lowercase_ : Any = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on lowercase_ : Union[str, Any] = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE ) lowercase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : str = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def lowerCAmelCase__ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def lowerCAmelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ : Union[str, Any] = "I was born in 92000, and this is falsé." lowercase_ : int = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on lowercase_ : int = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Dict = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE ) lowercase_ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ : Any = "I was born in 92000, and this is falsé." lowercase_ : Tuple = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on lowercase_ : int = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE ) lowercase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Any = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE ) lowercase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ : List[str] = "I was born in 92000, and this is falsé." lowercase_ : List[Any] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on lowercase_ : List[str] = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE ) lowercase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Any = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE ) lowercase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ : List[str] = "I was born in 92000, and this is falsé." lowercase_ : List[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on lowercase_ : Optional[int] = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE ) lowercase_ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ : int = " \tHeLLo!how \n Are yoU? " lowercase_ : List[str] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on lowercase_ : int = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : int = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , split_by_punct=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : str = self.get_rust_tokenizer() lowercase_ : List[str] = "I was born in 92000, and this is falsé." lowercase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) lowercase_ : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) lowercase_ : str = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = self.get_rust_tokenizer() lowercase_ : List[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE ) lowercase_ : int = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ : Dict = "This is a test" lowercase_ : Dict = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9] lowercase_ : Tuple = ["▁", "T", "his", "▁is", "▁a", "▁test"] lowercase_ : Union[str, Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] lowercase_ : Union[str, Any] = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) lowercase_ : int = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : int = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Dict = rust_tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # fmt: off lowercase_ : List[str] = "I was born in 92000, and this is falsé." lowercase_ : Tuple = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] lowercase_ : Optional[int] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] lowercase_ : Tuple = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on lowercase_ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ : Optional[Any] = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE ) lowercase_ : str = tokenizer.encode("sequence builders" ) lowercase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" ) lowercase_ : int = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE ) lowercase_ : str = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __SCREAMING_SNAKE_CASE ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __SCREAMING_SNAKE_CASE , ) @slow def lowerCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ : List[Any] = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
620
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _a : List[Any] = logging.get_logger(__name__) _a : int = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } _a : Any = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str: for attribute in key.split(""".""" ): __lowerCAmelCase = getattr(lowercase , lowercase ) if weight_type is not None: __lowerCAmelCase = getattr(lowercase , lowercase ).shape else: __lowerCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": __lowerCAmelCase = value elif weight_type == "weight_g": __lowerCAmelCase = value elif weight_type == "weight_v": __lowerCAmelCase = value elif weight_type == "bias": __lowerCAmelCase = value else: __lowerCAmelCase = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]: __lowerCAmelCase = [] __lowerCAmelCase = fairseq_model.state_dict() __lowerCAmelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , ) __lowerCAmelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key): # special case since naming is very similar continue __lowerCAmelCase = True if "*" in mapped_key: __lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2] __lowerCAmelCase = mapped_key.replace("""*""" , lowercase ) if "weight_g" in name: __lowerCAmelCase = """weight_g""" elif "weight_v" in name: __lowerCAmelCase = """weight_v""" elif "bias" in name: __lowerCAmelCase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase = """weight""" else: __lowerCAmelCase = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(f'Unused weights: {unused_weights}' ) def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __lowerCAmelCase = full_name.split("""conv_layers.""" )[-1] __lowerCAmelCase = name.split(""".""" ) __lowerCAmelCase = int(items[0] ) __lowerCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowercase ) @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict: if config_path is not None: __lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase ) else: __lowerCAmelCase = UniSpeechSatConfig() __lowerCAmelCase = """""" if is_finetuned: __lowerCAmelCase = UniSpeechSatForCTC(lowercase ) else: __lowerCAmelCase = UniSpeechSatForPreTraining(lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) __lowerCAmelCase = model[0].eval() recursively_load_weights(lowercase , lowercase ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": _a : List[str] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) _a : Union[str, Any] = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
689
0
'''simple docstring''' from collections import deque from .hash_table import HashTable class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): def __init__( self : str , *lowercase__ : Dict , **lowercase__ : int ): '''simple docstring''' super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def lowercase_ ( self : str , lowercase__ : Optional[Any] , lowercase__ : Any ): '''simple docstring''' a_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__SCREAMING_SNAKE_CASE ) a_ : Optional[int] = self.values[key] def lowercase_ ( self : str ): '''simple docstring''' return ( sum(self.charge_factor - len(__SCREAMING_SNAKE_CASE ) for slot in self.values ) / self.size_table * self.charge_factor ) def lowercase_ ( self : Optional[Any] , lowercase__ : List[Any] , lowercase__ : int=None ): '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__SCREAMING_SNAKE_CASE ) == 0 ): return key return super()._collision_resolution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
442
'''simple docstring''' from scipy.stats import spearmanr import datasets _a : str = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ _a : Dict = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ _a : List[str] = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): def lowerCamelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
689
0
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : int ): snake_case : List[str] = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): snake_case : str = n - k # Calculate C(n,k) for i in range(__lowerCamelCase ): result *= n - i result //= i + 1 return result def UpperCamelCase ( __lowerCamelCase : int ): return binomial_coefficient(2 * node_count , __lowerCamelCase ) // (node_count + 1) def UpperCamelCase ( __lowerCamelCase : Tuple ): if n < 0: raise ValueError("factorial() not defined for negative values" ) snake_case : str = 1 for i in range(1 , n + 1 ): result *= i return result def UpperCamelCase ( __lowerCamelCase : int ): return catalan_number(__lowerCamelCase ) * factorial(__lowerCamelCase ) if __name__ == "__main__": __lowerCamelCase = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( F'Given {node_count} nodes, there are {binary_tree_count(node_count)} ' F'binary trees and {catalan_number(node_count)} binary search trees.' )
204
'''simple docstring''' from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ): a : List[str] =["""onnx"""] def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' requires_backends(self,["""onnx"""] ) @classmethod def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' requires_backends(cls,["""onnx"""] ) @classmethod def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' requires_backends(cls,["""onnx"""] )
689
0
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE( self :str ) ->str: torch.manual_seed(0 ) lowercase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def SCREAMING_SNAKE_CASE( self :Tuple ) ->str: lowercase = self.dummy_uncond_unet lowercase = ScoreSdeVeScheduler() lowercase = ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowercase = torch.manual_seed(0 ) lowercase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=__SCREAMING_SNAKE_CASE ).images lowercase = torch.manual_seed(0 ) lowercase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[ 0 ] lowercase = image[0, -3:, -3:, -1] lowercase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE( self :int ) ->Tuple: lowercase = "google/ncsnpp-church-256" lowercase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) lowercase = ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE ) lowercase = ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) sde_ve.to(__SCREAMING_SNAKE_CASE ) sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowercase = torch.manual_seed(0 ) lowercase = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=__SCREAMING_SNAKE_CASE ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowercase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
441
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _a : int = logging.get_logger(__name__) _a : Optional[int] = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class _UpperCAmelCase ( lowerCAmelCase_ ): a : List[str] ="""gptj""" a : Optional[int] ={ """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = vocab_size __lowerCAmelCase = n_positions __lowerCAmelCase = n_embd __lowerCAmelCase = n_layer __lowerCAmelCase = n_head __lowerCAmelCase = n_inner __lowerCAmelCase = rotary_dim __lowerCAmelCase = activation_function __lowerCAmelCase = resid_pdrop __lowerCAmelCase = embd_pdrop __lowerCAmelCase = attn_pdrop __lowerCAmelCase = layer_norm_epsilon __lowerCAmelCase = initializer_range __lowerCAmelCase = use_cache __lowerCAmelCase = bos_token_id __lowerCAmelCase = eos_token_id super().__init__( bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ) class _UpperCAmelCase ( lowerCAmelCase_ ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,): '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE ) if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ): # TODO: how to do that better? __lowerCAmelCase = 0 @property def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" ) __lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""} else: __lowerCAmelCase = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowerCamelCase__ ( self ): '''simple docstring''' return self._config.n_layer @property def lowerCamelCase__ ( self ): '''simple docstring''' return self._config.n_head def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,): '''simple docstring''' __lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs( __SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() __lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __lowerCAmelCase = seqlen + 2 __lowerCAmelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __lowerCAmelCase = [ (torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] __lowerCAmelCase = common_inputs["""attention_mask"""] if self.use_past: __lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype __lowerCAmelCase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 ) return ordered_inputs @property def lowerCamelCase__ ( self ): '''simple docstring''' return 13
689
0
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = ["""model.decoder.embed_positions.weights"""] def __UpperCamelCase ( lowercase_ : Optional[int] ): """simple docstring""" if "emb" in name: a_ = name.replace('emb' , 'model.decoder.embed_tokens' ) if "transformer" in name: a_ = name.replace('transformer' , 'model.decoder' ) if "cross_attention" in name: a_ = name.replace('cross_attention' , 'encoder_attn' ) if "linear1" in name: a_ = name.replace('linear1' , 'fc1' ) if "linear2" in name: a_ = name.replace('linear2' , 'fc2' ) if "norm1" in name: a_ = name.replace('norm1' , 'self_attn_layer_norm' ) if "norm_cross" in name: a_ = name.replace('norm_cross' , 'encoder_attn_layer_norm' ) if "norm2" in name: a_ = name.replace('norm2' , 'final_layer_norm' ) if "out_norm" in name: a_ = name.replace('out_norm' , 'model.decoder.layer_norm' ) if "linears" in name: a_ = name.replace('linears' , 'lm_heads' ) if "condition_provider.conditioners.description.output_proj" in name: a_ = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' ) return name def __UpperCamelCase ( lowercase_ : Tuple , lowercase_ : List[Any] ): """simple docstring""" a_ = list(state_dict.keys() ) a_ = {} for key in keys: a_ = state_dict.pop(lowercase_ ) a_ = rename_keys(lowercase_ ) if "in_proj_weight" in key: # split fused qkv proj a_ = val[:hidden_size, :] a_ = val[hidden_size : 2 * hidden_size, :] a_ = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: a_ = val else: a_ = val return state_dict, enc_dec_proj_state_dict def __UpperCamelCase ( lowercase_ : Any ): """simple docstring""" if checkpoint == "small": # default config values a_ = 1_024 a_ = 24 a_ = 16 elif checkpoint == "medium": a_ = 1_536 a_ = 48 a_ = 24 elif checkpoint == "large": a_ = 2_048 a_ = 48 a_ = 32 else: raise ValueError(F'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) a_ = MusicgenDecoderConfig( hidden_size=lowercase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase_ , num_attention_heads=lowercase_ , ) return config @torch.no_grad() def __UpperCamelCase ( lowercase_ : Any , lowercase_ : List[Any]=None , lowercase_ : str=None , lowercase_ : int="cpu" ): """simple docstring""" a_ = MusicGen.get_pretrained(lowercase_ , device=lowercase_ ) a_ = decoder_config_from_checkpoint(lowercase_ ) a_ = fairseq_model.lm.state_dict() a_ , a_ = rename_state_dict( lowercase_ , hidden_size=decoder_config.hidden_size ) a_ = TaEncoderModel.from_pretrained('t5-base' ) a_ = EncodecModel.from_pretrained('facebook/encodec_32khz' ) a_ = MusicgenForCausalLM(lowercase_ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection a_ , a_ = decoder.load_state_dict(lowercase_ , strict=lowercase_ ) for key in missing_keys.copy(): if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(lowercase_ ) if len(lowercase_ ) > 0: raise ValueError(F'Missing key(s) in state_dict: {missing_keys}' ) if len(lowercase_ ) > 0: raise ValueError(F'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model a_ = MusicgenForConditionalGeneration(text_encoder=lowercase_ , audio_encoder=lowercase_ , decoder=lowercase_ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(lowercase_ ) # check we can do a forward pass a_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) a_ = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): a_ = model(input_ids=lowercase_ , decoder_input_ids=lowercase_ ).logits if logits.shape != (8, 1, 2_048): raise ValueError('Incorrect shape for logits' ) # now construct the processor a_ = AutoTokenizer.from_pretrained('t5-base' ) a_ = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' ) a_ = MusicgenProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ ) # set the appropriate bos/pad token ids a_ = 2_048 a_ = 2_048 # set other default generation config params a_ = int(30 * audio_encoder.config.frame_rate ) a_ = True a_ = 3.0 if pytorch_dump_folder is not None: Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) logger.info(F'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(lowercase_ ) processor.save_pretrained(lowercase_ ) if repo_id: logger.info(F'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(lowercase_ ) processor.push_to_hub(lowercase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) __lowerCAmelCase = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
536
'''simple docstring''' def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int: __lowerCAmelCase = set() __lowerCAmelCase = int((limit - 24) ** (1 / 2) ) __lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) ) for primea in primes: __lowerCAmelCase = primea * primea for primea in primes: __lowerCAmelCase = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: __lowerCAmelCase = primea * primea * primea * primea __lowerCAmelCase = square + cube + tetr if total >= limit: break ret.add(lowercase ) return len(lowercase ) if __name__ == "__main__": print(f'{solution() = }')
689
0
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
636
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ): a : Optional[int] =TextToVideoSDPipeline a : Optional[int] =TEXT_TO_IMAGE_PARAMS a : Any =TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. a : Union[str, Any] =frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def lowerCamelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,) __lowerCAmelCase = DDIMScheduler( beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,) torch.manual_seed(0 ) __lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,) __lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ): '''simple docstring''' if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = """np""" __lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames __lowerCAmelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) __lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase__ ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",) def lowerCamelCase__ ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowerCamelCase__ ( self ): '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowerCamelCase__ ( self ): '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def lowerCamelCase__ ( self ): '''simple docstring''' pass def lowerCamelCase__ ( self ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) __lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) __lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __lowerCAmelCase = pipe.to("""cuda""" ) __lowerCAmelCase = """Spiderman is surfing""" __lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames __lowerCAmelCase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) __lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) __lowerCAmelCase = pipe.to("""cuda""" ) __lowerCAmelCase = """Spiderman is surfing""" __lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames __lowerCAmelCase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
689
0
"""simple docstring""" from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __snake_case = logging.get_logger(__name__) class _lowerCAmelCase : __UpperCAmelCase : str __UpperCAmelCase : str = None @staticmethod def lowerCamelCase ( ) -> str: '''simple docstring''' raise NotImplementedError def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' raise NotImplementedError def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' raise NotImplementedError def lowerCamelCase ( self ) -> Optional[int]: '''simple docstring''' if not self.is_available(): raise RuntimeError( F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' ) @classmethod def lowerCamelCase ( cls ) -> List[Any]: '''simple docstring''' return F'`pip install {cls.pip_package or cls.name}`' class _lowerCAmelCase ( lowerCAmelCase_ ): __UpperCAmelCase : Optional[Any] = """optuna""" @staticmethod def lowerCamelCase ( ) -> List[str]: '''simple docstring''' return is_optuna_available() def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Any: '''simple docstring''' return run_hp_search_optuna(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple: '''simple docstring''' return default_hp_space_optuna(__SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( lowerCAmelCase_ ): __UpperCAmelCase : List[str] = """ray""" __UpperCAmelCase : List[str] = """'ray[tune]'""" @staticmethod def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' return is_ray_available() def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> str: '''simple docstring''' return run_hp_search_ray(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' return default_hp_space_ray(__SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( lowerCAmelCase_ ): __UpperCAmelCase : Optional[int] = """sigopt""" @staticmethod def lowerCamelCase ( ) -> Optional[int]: '''simple docstring''' return is_sigopt_available() def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> int: '''simple docstring''' return run_hp_search_sigopt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' return default_hp_space_sigopt(__SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( lowerCAmelCase_ ): __UpperCAmelCase : Dict = """wandb""" @staticmethod def lowerCamelCase ( ) -> str: '''simple docstring''' return is_wandb_available() def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Any: '''simple docstring''' return run_hp_search_wandb(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def lowerCamelCase ( self , UpperCamelCase__ ) -> str: '''simple docstring''' return default_hp_space_wandb(__SCREAMING_SNAKE_CASE ) __snake_case = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __lowerCAmelCase ( ) -> str: """simple docstring""" snake_case : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowercase ) > 0: snake_case : Optional[Any] = available_backends[0].name if len(lowercase ) > 1: logger.info( F'{len(lowercase )} hyperparameter search backends available. Using {name} as the default.' ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( F' - To install {backend.name} run {backend.pip_install()}' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
178
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _lowerCAmelCase ( lowercase ) -> Optional[int]: if not is_accelerate_available(): return method __lowerCAmelCase = version.parse(accelerate.__version__ ).base_version if version.parse(lowercase ) < version.parse("""0.17.0""" ): return method def wrapper(self , *lowercase , **lowercase ): if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ): self._hf_hook.pre_forward(self ) return method(self , *lowercase , **lowercase ) return wrapper
689
0
def _lowerCAmelCase ( __lowerCAmelCase = 1000000 ) -> int: """simple docstring""" snake_case__ : str = set(range(3 , __lowerCAmelCase , 2 ) ) primes.add(2 ) for p in range(3 , __lowerCAmelCase , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , __lowerCAmelCase , __lowerCAmelCase ) ) ) snake_case__ : Optional[Any] = [float(__lowerCAmelCase ) for n in range(limit + 1 )] for p in primes: for n in range(__lowerCAmelCase , limit + 1 , __lowerCAmelCase ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f"""{solution() = }""")
252
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: # load base model __lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors __lowerCAmelCase = load_file(lowercase ) __lowerCAmelCase = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: __lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) __lowerCAmelCase = pipeline.text_encoder else: __lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) __lowerCAmelCase = pipeline.unet # find the target layer __lowerCAmelCase = layer_infos.pop(0 ) while len(lowercase ) > -1: try: __lowerCAmelCase = curr_layer.__getattr__(lowercase ) if len(lowercase ) > 0: __lowerCAmelCase = layer_infos.pop(0 ) elif len(lowercase ) == 0: break except Exception: if len(lowercase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: __lowerCAmelCase = layer_infos.pop(0 ) __lowerCAmelCase = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) ) pair_keys.append(lowercase ) else: pair_keys.append(lowercase ) pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: __lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) __lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 ) else: __lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa ) __lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ) # update visited list for item in pair_keys: visited.append(lowercase ) return pipeline if __name__ == "__main__": _a : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") _a : Optional[int] = parser.parse_args() _a : Dict = args.base_model_path _a : Optional[Any] = args.checkpoint_path _a : Union[str, Any] = args.dump_path _a : Optional[int] = args.lora_prefix_unet _a : int = args.lora_prefix_text_encoder _a : str = args.alpha _a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _a : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
689
0
def __lowerCAmelCase ( A ): UpperCAmelCase_ = int(A ) if n_element < 1: UpperCAmelCase_ = ValueError("a should be a positive number" ) raise my_error UpperCAmelCase_ = [1] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (0, 0, 0) UpperCAmelCase_ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": _a: Optional[Any] = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") _a: int = hamming(int(n)) print("""-----------------------------------------------------""") print(F'The list with nth numbers is: {hamming_numbers}') print("""-----------------------------------------------------""")
162
'''simple docstring''' from collections import Counter from timeit import timeit def _lowerCAmelCase ( lowercase = "" , ) -> bool: return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2 def _lowerCAmelCase ( lowercase = "" ) -> bool: if len(lowercase ) == 0: return True __lowerCAmelCase = input_str.replace(""" """ , """""" ).lower() # character_freq_dict: Stores the frequency of every character in the input string __lowerCAmelCase = {} for character in lower_case_input_str: __lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1 __lowerCAmelCase = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def _lowerCAmelCase ( lowercase = "" ) -> None: print("""\nFor string = """ , lowercase , """:""" ) print( """> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) print( """> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) if __name__ == "__main__": _a : int = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) _a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str) print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
689
0
'''simple docstring''' from math import isclose, sqrt def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __A : Tuple = point_y / 4 / point_x __A : Optional[Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __A : Union[str, Any] = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __A : Union[str, Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __A : int = outgoing_gradient**2 + 4 __A : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __A : Optional[int] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __A : Dict = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __A : List[str] = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __A : Optional[int] = x_minus if isclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else x_plus __A : Any = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def _lowercase (SCREAMING_SNAKE_CASE = 1.4 , SCREAMING_SNAKE_CASE = -9.6 ): '''simple docstring''' __A : Union[str, Any] = 0 __A : Any = first_x_coord __A : List[str] = first_y_coord __A : Optional[int] = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __A ,__A ,__A : Dict = next_point(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
111
'''simple docstring''' import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _lowerCAmelCase ( lowercase ) -> List[Any]: __lowerCAmelCase = VideoMAEConfig() set_architecture_configs(lowercase , lowercase ) if "finetuned" not in model_name: __lowerCAmelCase = False if "finetuned" in model_name: __lowerCAmelCase = """huggingface/label-files""" if "kinetics" in model_name: __lowerCAmelCase = 400 __lowerCAmelCase = """kinetics400-id2label.json""" elif "ssv2" in model_name: __lowerCAmelCase = 174 __lowerCAmelCase = """something-something-v2-id2label.json""" else: raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" ) __lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) ) __lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} return config def _lowerCAmelCase ( lowercase , lowercase ) -> Any: if "small" in model_name: __lowerCAmelCase = 384 __lowerCAmelCase = 1536 __lowerCAmelCase = 12 __lowerCAmelCase = 16 __lowerCAmelCase = 12 __lowerCAmelCase = 3 __lowerCAmelCase = 192 __lowerCAmelCase = 768 elif "large" in model_name: __lowerCAmelCase = 1024 __lowerCAmelCase = 4096 __lowerCAmelCase = 24 __lowerCAmelCase = 16 __lowerCAmelCase = 12 __lowerCAmelCase = 8 __lowerCAmelCase = 512 __lowerCAmelCase = 2048 elif "huge" in model_name: __lowerCAmelCase = 1280 __lowerCAmelCase = 5120 __lowerCAmelCase = 32 __lowerCAmelCase = 16 __lowerCAmelCase = 12 __lowerCAmelCase = 8 __lowerCAmelCase = 640 __lowerCAmelCase = 2560 elif "base" not in model_name: raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" ) def _lowerCAmelCase ( lowercase ) -> List[str]: if "encoder." in name: __lowerCAmelCase = name.replace("""encoder.""" , """""" ) if "cls_token" in name: __lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" ) if "decoder_pos_embed" in name: __lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: __lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" ) if "decoder.blocks" in name: __lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: __lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" ) if "attn.proj" in name: __lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "bias" not in name: __lowerCAmelCase = name.replace("""attn""" , """attention.self""" ) if "attn" in name: __lowerCAmelCase = name.replace("""attn""" , """attention.attention""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: __lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: __lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: __lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: __lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: __lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" ) if "head" in name and "decoder" not in name: __lowerCAmelCase = name.replace("""head""" , """classifier""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]: for key in orig_state_dict.copy().keys(): __lowerCAmelCase = orig_state_dict.pop(lowercase ) if key.startswith("""encoder.""" ): __lowerCAmelCase = key.replace("""encoder.""" , """""" ) if "qkv" in key: __lowerCAmelCase = key.split(""".""" ) if key.startswith("""decoder.blocks""" ): __lowerCAmelCase = config.decoder_hidden_size __lowerCAmelCase = int(key_split[2] ) __lowerCAmelCase = """decoder.decoder_layers.""" if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[dim : dim * 2, :] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = config.hidden_size __lowerCAmelCase = int(key_split[1] ) __lowerCAmelCase = """videomae.encoder.layer.""" if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[dim : dim * 2, :] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val return orig_state_dict def _lowerCAmelCase ( ) -> str: __lowerCAmelCase = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __lowerCAmelCase = np.load(lowercase ) return list(lowercase ) def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __lowerCAmelCase = get_videomae_config(lowercase ) if "finetuned" in model_name: __lowerCAmelCase = VideoMAEForVideoClassification(lowercase ) else: __lowerCAmelCase = VideoMAEForPreTraining(lowercase ) # download original checkpoint, hosted on Google Drive __lowerCAmelCase = """pytorch_model.bin""" gdown.cached_download(lowercase , lowercase , quiet=lowercase ) __lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" ) if "model" in files: __lowerCAmelCase = files["""model"""] else: __lowerCAmelCase = files["""module"""] __lowerCAmelCase = convert_state_dict(lowercase , lowercase ) model.load_state_dict(lowercase ) model.eval() # verify model on basic input __lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) __lowerCAmelCase = prepare_video() __lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" ) if "finetuned" not in model_name: __lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" ) __lowerCAmelCase = torch.load(lowercase ) __lowerCAmelCase = model(**lowercase ) __lowerCAmelCase = outputs.logits __lowerCAmelCase = [ """videomae-small-finetuned-kinetics""", """videomae-small-finetuned-ssv2""", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) """videomae-base-short""", """videomae-base-short-finetuned-kinetics""", """videomae-base""", """videomae-base-finetuned-kinetics""", """videomae-large""", """videomae-large-finetuned-kinetics""", """videomae-huge-finetuned-kinetics""", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) """videomae-base-short-ssv2""", """videomae-base-short-finetuned-ssv2""", """videomae-base-ssv2""", """videomae-base-finetuned-ssv2""", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": __lowerCAmelCase = torch.Size([1, 174] ) __lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one __lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": __lowerCAmelCase = torch.Size([1, 400] ) __lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": __lowerCAmelCase = torch.Size([1, 174] ) __lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": __lowerCAmelCase = torch.Size([1, 1408, 1536] ) __lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": __lowerCAmelCase = torch.Size([1, 174] ) __lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(f'Model name not supported. Should be one of {model_names}' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 ) else: print("""Logits:""" , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 ) print("""Logits ok!""" ) # verify loss, if applicable if model_name == "videomae-base-short": __lowerCAmelCase = outputs.loss assert torch.allclose(lowercase , lowercase , atol=1e-4 ) print("""Loss ok!""" ) if pytorch_dump_folder_path is not None: print(f'Saving model and image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowercase ) model.save_pretrained(lowercase ) if push_to_hub: print("""Pushing to the hub...""" ) model.push_to_hub(lowercase , organization="""nielsr""" ) if __name__ == "__main__": _a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""", type=str, help=( """URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct""" """ download link.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default="""/Users/nielsrogge/Documents/VideoMAE/Test""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : int = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
689
0
'''simple docstring''' def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> int: '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: a_ = _modexpt(lowercase__ ,exponent // 2 ,lowercase__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(lowercase__ ,exponent - 1 ,lowercase__ )) % modulo_value def __UpperCAmelCase (lowercase__ = 1777 ,lowercase__ = 1855 ,lowercase__ = 8 ) -> int: '''simple docstring''' a_ = base for _ in range(1 ,lowercase__ ): a_ = _modexpt(lowercase__ ,lowercase__ ,10**digits ) return result if __name__ == "__main__": print(F'{solution() = }')
685
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _a : Tuple = """\ """ _a : Tuple = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ _a : Optional[Any] = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): def lowerCamelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": __lowerCAmelCase = """cuda""" else: __lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu""" __lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: __lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__SCREAMING_SNAKE_CASE ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" __lowerCAmelCase = model.config.max_length - 1 else: __lowerCAmelCase = model.config.max_length __lowerCAmelCase = tokenizer( __SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = encodings["""input_ids"""] __lowerCAmelCase = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." __lowerCAmelCase = [] __lowerCAmelCase = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ): __lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = encoded_texts[start_index:end_index] __lowerCAmelCase = attn_masks[start_index:end_index] if add_start_token: __lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 ) __lowerCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 ) __lowerCAmelCase = encoded_batch with torch.no_grad(): __lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits __lowerCAmelCase = out_logits[..., :-1, :].contiguous() __lowerCAmelCase = labels[..., 1:].contiguous() __lowerCAmelCase = attn_mask[..., 1:].contiguous() __lowerCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
689
0
'''simple docstring''' import re from filelock import FileLock try: import nltk UpperCamelCase__ = True except (ImportError, ModuleNotFoundError): UpperCamelCase__ = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" re.sub("<n>" , "" , _UpperCamelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_UpperCamelCase ) )
620
'''simple docstring''' from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class _UpperCAmelCase ( lowerCAmelCase_ ): a : Union[str, Any] =["""image_processor"""] a : Dict ="""SamImageProcessor""" def __init__( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.image_processor __lowerCAmelCase = -10 __lowerCAmelCase = self.image_processor.size["""longest_edge"""] def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,): '''simple docstring''' __lowerCAmelCase = self.image_processor( __SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,) # pop arguments that are not used in the foward but used nevertheless __lowerCAmelCase = encoding_image_processor["""original_sizes"""] if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor __lowerCAmelCase = original_sizes.numpy() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points( input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = self._normalize_and_convert( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,) return encoding_image_processor def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",): '''simple docstring''' if input_points is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points ] else: __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: __lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_labels is not None: __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_boxes is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE ) for box in input_boxes ] else: __lowerCAmelCase = [ self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE ) for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) ] __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE ) if input_boxes is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # boxes batch size of 1 by default __lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # boxes batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"""input_boxes""": input_boxes} ) if input_points is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"""input_points""": input_points} ) if input_labels is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # point batch size of 1 by default __lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"""input_labels""": input_labels} ) return encoding_image_processor def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = max([point.shape[0] for point in input_points] ) __lowerCAmelCase = [] for i, point in enumerate(__SCREAMING_SNAKE_CASE ): if point.shape[0] != expected_nb_points: __lowerCAmelCase = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 ) __lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] ) processed_input_points.append(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = processed_input_points return input_points, input_labels def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase = original_size __lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE ) if is_bounding_box: __lowerCAmelCase = coords.reshape(-1,2,2 ) __lowerCAmelCase = coords[..., 0] * (new_w / old_w) __lowerCAmelCase = coords[..., 1] * (new_h / old_h) if is_bounding_box: __lowerCAmelCase = coords.reshape(-1,4 ) return coords def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,): '''simple docstring''' if input_points is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor __lowerCAmelCase = input_points.numpy().tolist() if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ): raise ValueError("""Input points must be a list of list of floating points.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points] else: __lowerCAmelCase = None if input_labels is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): __lowerCAmelCase = input_labels.numpy().tolist() if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ): raise ValueError("""Input labels must be a list of list integers.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels] else: __lowerCAmelCase = None if input_boxes is not None: if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): __lowerCAmelCase = input_boxes.numpy().tolist() if ( not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE ) or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE ) ): raise ValueError("""Input boxes must be a list of list of list of floating points.""" ) __lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes] else: __lowerCAmelCase = None return input_points, input_labels, input_boxes @property def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) ) def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ): '''simple docstring''' return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
689
0
'''simple docstring''' from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
442
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.17.0.dev0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") _a : int = logging.getLogger(__name__) @dataclass class _UpperCAmelCase : a : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) a : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , ) a : int =field( default=10_24 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) a : bool =field( default=lowerCAmelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) a : Optional[int] =field( default=lowerCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} ) a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} ) def lowerCamelCase__ ( self ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: __lowerCAmelCase = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __lowerCAmelCase = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _UpperCAmelCase : a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a : Optional[str] =field( default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a : str =field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a : bool =field( default=lowerCAmelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def _lowerCAmelCase ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) __lowerCAmelCase = training_args.get_process_log_level() logger.setLevel(lowercase ) datasets.utils.logging.set_verbosity(lowercase ) transformers.utils.logging.set_verbosity(lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __lowerCAmelCase = data_args.train_file.split(""".""" )[-1] __lowerCAmelCase = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __lowerCAmelCase = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f'load a local file for {key}: {data_files[key]}' ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files __lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names __lowerCAmelCase = len(lowercase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __lowerCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , ) __lowerCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __lowerCAmelCase = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __lowerCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. __lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1} __lowerCAmelCase = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(lowercase ): # Tokenize the texts def _convert_table_text_to_pandas(lowercase ): __lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] __lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __lowerCAmelCase = examples["""statement"""] __lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) __lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase ) __lowerCAmelCase = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): __lowerCAmelCase = raw_datasets.map( lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) __lowerCAmelCase = raw_datasets["""train"""] if data_args.max_train_samples is not None: __lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) __lowerCAmelCase = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: __lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) __lowerCAmelCase = raw_datasets["""test"""] if data_args.max_predict_samples is not None: __lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(lowercase ) ) , 3 ): logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowercase ): __lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions __lowerCAmelCase = np.argmax(lowercase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __lowerCAmelCase = default_data_collator elif training_args.fpaa: __lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 ) else: __lowerCAmelCase = None # Initialize our Trainer __lowerCAmelCase = Trainer( model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , ) # Training if training_args.do_train: __lowerCAmelCase = None if training_args.resume_from_checkpoint is not None: __lowerCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCAmelCase = last_checkpoint __lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase ) __lowerCAmelCase = train_result.metrics __lowerCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase ) ) __lowerCAmelCase = min(lowercase , len(lowercase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , lowercase ) trainer.save_metrics("""train""" , lowercase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase ) __lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase ) __lowerCAmelCase = min(lowercase , len(lowercase ) ) trainer.log_metrics("""eval""" , lowercase ) trainer.save_metrics("""eval""" , lowercase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __lowerCAmelCase = predict_dataset.remove_columns("""label""" ) __lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions __lowerCAmelCase = np.argmax(lowercase , axis=1 ) __lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(lowercase , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(lowercase ): __lowerCAmelCase = label_list[item] writer.write(f'{index}\t{item}\n' ) __lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**lowercase ) else: trainer.create_model_card(**lowercase ) def _lowerCAmelCase ( lowercase ) -> str: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
689
0
import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __lowerCamelCase = logging.getLogger(__name__) __lowerCamelCase = tf.data.AUTOTUNE def UpperCamelCase ( ): snake_case : int = argparse.ArgumentParser(description="Train a masked language model on TPU." ) parser.add_argument( "--pretrained_model_config" , type=__lowerCamelCase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , ) parser.add_argument( "--tokenizer" , type=__lowerCamelCase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , ) parser.add_argument( "--per_replica_batch_size" , type=__lowerCamelCase , default=8 , help="Batch size per TPU core." , ) parser.add_argument( "--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , ) parser.add_argument( "--tpu_name" , type=__lowerCamelCase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , ) parser.add_argument( "--tpu_zone" , type=__lowerCamelCase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , ) parser.add_argument( "--gcp_project" , type=__lowerCamelCase , help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , ) parser.add_argument( "--train_dataset" , type=__lowerCamelCase , help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--shuffle_buffer_size" , type=__lowerCamelCase , default=2**18 , help="Size of the shuffle buffer (in samples)" , ) parser.add_argument( "--eval_dataset" , type=__lowerCamelCase , help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--num_epochs" , type=__lowerCamelCase , default=1 , help="Number of epochs to train for." , ) parser.add_argument( "--learning_rate" , type=__lowerCamelCase , default=1E-4 , help="Learning rate to use for training." , ) parser.add_argument( "--weight_decay_rate" , type=__lowerCamelCase , default=1E-3 , help="Weight decay rate to use for training." , ) parser.add_argument( "--max_length" , type=__lowerCamelCase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , ) parser.add_argument( "--mlm_probability" , type=__lowerCamelCase , default=0.15 , help="Fraction of tokens to mask during training." , ) parser.add_argument("--output_dir" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to save model checkpoints to." ) parser.add_argument("--hub_model_id" , type=__lowerCamelCase , help="Model ID to upload to on the Hugging Face Hub." ) snake_case : Any = parser.parse_args() return args def UpperCamelCase ( __lowerCamelCase : Optional[int] ): try: if args.tpu_name: snake_case : Dict = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(__lowerCamelCase ) tf.tpu.experimental.initialize_tpu_system(__lowerCamelCase ) return tpu def UpperCamelCase ( __lowerCamelCase : List[Any] ): snake_case : Optional[int] = 0 for file in file_list: snake_case : Optional[Any] = file.split("/" )[-1] snake_case : Union[str, Any] = re.search(r"-\d+-(\d+)\.tfrecord" , __lowerCamelCase ).group(1 ) snake_case : Optional[int] = int(__lowerCamelCase ) num_samples += sample_count return num_samples def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Dict=None ): snake_case : List[Any] = count_samples(__lowerCamelCase ) snake_case : List[str] = tf.data.Dataset.from_tensor_slices(__lowerCamelCase ) if shuffle: snake_case : List[str] = dataset.shuffle(len(__lowerCamelCase ) ) snake_case : Tuple = tf.data.TFRecordDataset(__lowerCamelCase , num_parallel_reads=__lowerCamelCase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case : Union[str, Any] = dataset.apply(tf.data.experimental.assert_cardinality(__lowerCamelCase ) ) snake_case : Tuple = dataset.map(__lowerCamelCase , num_parallel_calls=__lowerCamelCase ) if shuffle: assert shuffle_buffer_size is not None snake_case : Tuple = dataset.shuffle(args.shuffle_buffer_size ) snake_case : Any = dataset.batch(__lowerCamelCase , drop_remainder=__lowerCamelCase ) snake_case : Union[str, Any] = dataset.map(__lowerCamelCase , num_parallel_calls=__lowerCamelCase ) snake_case : Tuple = dataset.prefetch(__lowerCamelCase ) return dataset def UpperCamelCase ( __lowerCamelCase : Dict ): if not args.no_tpu: snake_case : Union[str, Any] = initialize_tpu(__lowerCamelCase ) snake_case : Dict = tf.distribute.TPUStrategy(__lowerCamelCase ) else: snake_case : List[Any] = tf.distribute.OneDeviceStrategy(device="/gpu:0" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" ) snake_case : List[str] = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case : Tuple = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case : int = tokenizer.vocab_size snake_case : List[Any] = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case : str = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case : Optional[Any] = count_samples(__lowerCamelCase ) snake_case : Tuple = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case : Tuple = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case : Optional[int] = TFAutoModelForMaskedLM.from_config(__lowerCamelCase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case , snake_case : str = create_optimizer( num_train_steps=__lowerCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=__lowerCamelCase , metrics=["accuracy"] ) def decode_fn(__lowerCamelCase : Optional[Any] ): snake_case : Dict = { "input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), "attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(__lowerCamelCase , __lowerCamelCase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case : Optional[int] = DataCollatorForLanguageModeling( tokenizer=__lowerCamelCase , mlm_probability=args.mlm_probability , mlm=__lowerCamelCase , return_tensors="tf" ) def mask_with_collator(__lowerCamelCase : str ): # TF really needs an isin() function snake_case : Dict = ( ~tf.cast(batch["attention_mask"] , tf.bool ) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) snake_case , snake_case : Union[str, Any] = data_collator.tf_mask_tokens( batch["input_ids"] , vocab_size=len(__lowerCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__lowerCamelCase , ) return batch snake_case : int = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case : List[Any] = prepare_dataset( __lowerCamelCase , decode_fn=__lowerCamelCase , mask_fn=__lowerCamelCase , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case : List[Any] = prepare_dataset( __lowerCamelCase , decode_fn=__lowerCamelCase , mask_fn=__lowerCamelCase , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , ) snake_case : Tuple = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__lowerCamelCase ) ) model.fit( __lowerCamelCase , validation_data=__lowerCamelCase , epochs=args.num_epochs , callbacks=__lowerCamelCase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __lowerCamelCase = parse_args() main(args)
204
'''simple docstring''' import os import sys import unittest _a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""") class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = find_backend(""" if not is_torch_available():""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") __lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") __lowerCAmelCase = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""",__SCREAMING_SNAKE_CASE ) self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE ) self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE ) self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""",objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" ) __lowerCAmelCase = create_dummy_object("""function""","""'torch'""" ) self.assertEqual( __SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) __lowerCAmelCase = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ __lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" ) self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ __lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
689
0
def __snake_case ( __magic_name__ ): '''simple docstring''' lowercase = len(__magic_name__ ) for _ in range(__magic_name__ ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: lowercase , lowercase = arr[i + 1], arr[i] return arr if __name__ == "__main__": _snake_case : int = list(range(10, 0, -1)) print(F"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
441
'''simple docstring''' def _lowerCAmelCase ( lowercase ) -> tuple[int, int]: try: __lowerCAmelCase = float(lowercase ) except ValueError: raise ValueError("""Please enter a valid number""" ) __lowerCAmelCase = decimal - int(lowercase ) if fractional_part == 0: return int(lowercase ), 1 else: __lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] ) __lowerCAmelCase = int(decimal * (10**number_of_frac_digits) ) __lowerCAmelCase = 10**number_of_frac_digits __lowerCAmelCase , __lowerCAmelCase = denominator, numerator while True: __lowerCAmelCase = dividend % divisor if remainder == 0: break __lowerCAmelCase , __lowerCAmelCase = divisor, remainder __lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor return int(lowercase ), int(lowercase ) if __name__ == "__main__": print(f'{decimal_to_fraction(2) = }') print(f'{decimal_to_fraction(89.0) = }') print(f'{decimal_to_fraction("67") = }') print(f'{decimal_to_fraction("45.0") = }') print(f'{decimal_to_fraction(1.5) = }') print(f'{decimal_to_fraction("6.25") = }') print(f'{decimal_to_fraction("78td") = }')
689
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __lowerCAmelCase = { """configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ """GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoForCausalLM""", """GPTNeoForQuestionAnswering""", """GPTNeoForSequenceClassification""", """GPTNeoForTokenClassification""", """GPTNeoModel""", """GPTNeoPreTrainedModel""", """load_tf_weights_in_gpt_neo""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ """FlaxGPTNeoForCausalLM""", """FlaxGPTNeoModel""", """FlaxGPTNeoPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
536
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _a : Dict = _symbol_database.Default() _a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile( b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03""" ) _a : str = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals) if _descriptor._USE_C_DESCRIPTORS is False: _a : str = None _a : Union[str, Any] = b"""H\003""" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _a : Optional[int] = 4_5 _a : List[Any] = 1_5_8_1 _a : str = 1_5_1_7 _a : Optional[Any] = 1_5_7_0 _a : List[str] = 1_5_8_4 _a : List[Any] = 1_7_9_3 _a : Union[str, Any] = 1_7_9_5 _a : Tuple = 1_9_1_6 _a : List[Any] = 1_8_6_4 _a : Any = 1_9_0_5 _a : Optional[Any] = 1_9_1_9 _a : Optional[int] = 2_4_2_9 _a : Tuple = 2_2_0_8 _a : Optional[Any] = 2_4_1_8 _a : List[Any] = 2_3_2_3 _a : str = 2_4_0_7 # @@protoc_insertion_point(module_scope)
689
0
"""simple docstring""" import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class _UpperCamelCase ( lowerCAmelCase_ ): '''simple docstring''' def __get__( self , __a , __a=None ): if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute" ) __lowerCAmelCase = "__cached_" + self.fget.__name__ __lowerCAmelCase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if cached is None: __lowerCAmelCase = self.fget(__SCREAMING_SNAKE_CASE ) setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return cached def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f"invalid truth value {val!r}" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if is_torch_fx_proxy(_UpperCamelCase ): return True if is_torch_available(): import torch if isinstance(_UpperCamelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(_UpperCamelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(_UpperCamelCase , (jnp.ndarray, Tracer) ): return True return isinstance(_UpperCamelCase , np.ndarray ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return isinstance(_UpperCamelCase , np.ndarray ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return _is_numpy(_UpperCamelCase ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' import torch return isinstance(_UpperCamelCase , torch.Tensor ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch(_UpperCamelCase ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' import torch return isinstance(_UpperCamelCase , torch.device ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch_device(_UpperCamelCase ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' import torch if isinstance(_UpperCamelCase , _UpperCamelCase ): if hasattr(_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase = getattr(_UpperCamelCase , _UpperCamelCase ) else: return False return isinstance(_UpperCamelCase , torch.dtype ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch_dtype(_UpperCamelCase ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' import tensorflow as tf return isinstance(_UpperCamelCase , tf.Tensor ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return False if not is_tf_available() else _is_tensorflow(_UpperCamelCase ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(_UpperCamelCase , "is_symbolic_tensor" ): return tf.is_symbolic_tensor(_UpperCamelCase ) return type(_UpperCamelCase ) == tf.Tensor def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return False if not is_tf_available() else _is_tf_symbolic_tensor(_UpperCamelCase ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' import jax.numpy as jnp # noqa: F811 return isinstance(_UpperCamelCase , jnp.ndarray ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return False if not is_flax_available() else _is_jax(_UpperCamelCase ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if isinstance(_UpperCamelCase , (dict, UserDict) ): return {k: to_py_obj(_UpperCamelCase ) for k, v in obj.items()} elif isinstance(_UpperCamelCase , (list, tuple) ): return [to_py_obj(_UpperCamelCase ) for o in obj] elif is_tf_tensor(_UpperCamelCase ): return obj.numpy().tolist() elif is_torch_tensor(_UpperCamelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(_UpperCamelCase ): return np.asarray(_UpperCamelCase ).tolist() elif isinstance(_UpperCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if isinstance(_UpperCamelCase , (dict, UserDict) ): return {k: to_numpy(_UpperCamelCase ) for k, v in obj.items()} elif isinstance(_UpperCamelCase , (list, tuple) ): return np.array(_UpperCamelCase ) elif is_tf_tensor(_UpperCamelCase ): return obj.numpy() elif is_torch_tensor(_UpperCamelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(_UpperCamelCase ): return np.asarray(_UpperCamelCase ) else: return obj class _UpperCamelCase ( lowerCAmelCase_ ): '''simple docstring''' def snake_case ( self ): __lowerCAmelCase = fields(self ) # Safety and consistency checks if not len(__SCREAMING_SNAKE_CASE ): raise ValueError(f"{self.__class__.__name__} has no fields." ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f"{self.__class__.__name__} should not have more than one required field." ) __lowerCAmelCase = getattr(self , class_fields[0].name ) __lowerCAmelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(__SCREAMING_SNAKE_CASE ): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __lowerCAmelCase = first_field.items() __lowerCAmelCase = True else: try: __lowerCAmelCase = iter(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = True except TypeError: __lowerCAmelCase = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__SCREAMING_SNAKE_CASE ): if ( not isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) or not len(__SCREAMING_SNAKE_CASE ) == 2 or not isinstance(element[0] , __SCREAMING_SNAKE_CASE ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute __lowerCAmelCase = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." ) break setattr(self , element[0] , element[1] ) if element[1] is not None: __lowerCAmelCase = element[1] elif first_field is not None: __lowerCAmelCase = first_field else: for field in class_fields: __lowerCAmelCase = getattr(self , field.name ) if v is not None: __lowerCAmelCase = v def __delitem__( self , *__a , **__a ): raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." ) def snake_case ( self , *__a , **__a ): raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." ) def snake_case ( self , *__a , **__a ): raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance." ) def snake_case ( self , *__a , **__a ): raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance." ) def __getitem__( self , __a ): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __lowerCAmelCase = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self , __a , __a ): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __setitem__( self , __a , __a ): super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def snake_case ( self ): return tuple(self[k] for k in self.keys() ) class _UpperCamelCase ( lowerCAmelCase_ ,lowerCAmelCase_ ): '''simple docstring''' @classmethod def snake_case ( cls , __a ): raise ValueError( f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" ) class _UpperCamelCase ( lowerCAmelCase_ ): '''simple docstring''' __UpperCAmelCase : Tuple ="""longest""" __UpperCAmelCase : Union[str, Any] ="""max_length""" __UpperCAmelCase : Any ="""do_not_pad""" class _UpperCamelCase ( lowerCAmelCase_ ): '''simple docstring''' __UpperCAmelCase : Tuple ="""pt""" __UpperCAmelCase : Tuple ="""tf""" __UpperCAmelCase : Union[str, Any] ="""np""" __UpperCAmelCase : Any ="""jax""" class _UpperCamelCase : '''simple docstring''' def __init__( self , __a ): __lowerCAmelCase = context_managers __lowerCAmelCase = ExitStack() def __enter__( self ): for context_manager in self.context_managers: self.stack.enter_context(__SCREAMING_SNAKE_CASE ) def __exit__( self , *__a , **__a ): self.stack.__exit__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = infer_framework(_UpperCamelCase ) if framework == "tf": __lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models else: __lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = model_class.__name__ __lowerCAmelCase = infer_framework(_UpperCamelCase ) if framework == "tf": __lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models else: __lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = "" , _UpperCamelCase = "." ): '''simple docstring''' def _flatten_dict(_UpperCamelCase , _UpperCamelCase="" , _UpperCamelCase="." ): for k, v in d.items(): __lowerCAmelCase = str(_UpperCamelCase ) + delimiter + str(_UpperCamelCase ) if parent_key else k if v and isinstance(_UpperCamelCase , _UpperCamelCase ): yield from flatten_dict(_UpperCamelCase , _UpperCamelCase , delimiter=_UpperCamelCase ).items() else: yield key, v return dict(_flatten_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ) @contextmanager def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = False ): '''simple docstring''' if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=None ): '''simple docstring''' if is_numpy_array(_UpperCamelCase ): return np.transpose(_UpperCamelCase , axes=_UpperCamelCase ) elif is_torch_tensor(_UpperCamelCase ): return array.T if axes is None else array.permute(*_UpperCamelCase ) elif is_tf_tensor(_UpperCamelCase ): import tensorflow as tf return tf.transpose(_UpperCamelCase , perm=_UpperCamelCase ) elif is_jax_tensor(_UpperCamelCase ): return jnp.transpose(_UpperCamelCase , axes=_UpperCamelCase ) else: raise ValueError(f"Type not supported for transpose: {type(_UpperCamelCase )}." ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if is_numpy_array(_UpperCamelCase ): return np.reshape(_UpperCamelCase , _UpperCamelCase ) elif is_torch_tensor(_UpperCamelCase ): return array.reshape(*_UpperCamelCase ) elif is_tf_tensor(_UpperCamelCase ): import tensorflow as tf return tf.reshape(_UpperCamelCase , _UpperCamelCase ) elif is_jax_tensor(_UpperCamelCase ): return jnp.reshape(_UpperCamelCase , _UpperCamelCase ) else: raise ValueError(f"Type not supported for reshape: {type(_UpperCamelCase )}." ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=None ): '''simple docstring''' if is_numpy_array(_UpperCamelCase ): return np.squeeze(_UpperCamelCase , axis=_UpperCamelCase ) elif is_torch_tensor(_UpperCamelCase ): return array.squeeze() if axis is None else array.squeeze(dim=_UpperCamelCase ) elif is_tf_tensor(_UpperCamelCase ): import tensorflow as tf return tf.squeeze(_UpperCamelCase , axis=_UpperCamelCase ) elif is_jax_tensor(_UpperCamelCase ): return jnp.squeeze(_UpperCamelCase , axis=_UpperCamelCase ) else: raise ValueError(f"Type not supported for squeeze: {type(_UpperCamelCase )}." ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if is_numpy_array(_UpperCamelCase ): return np.expand_dims(_UpperCamelCase , _UpperCamelCase ) elif is_torch_tensor(_UpperCamelCase ): return array.unsqueeze(dim=_UpperCamelCase ) elif is_tf_tensor(_UpperCamelCase ): import tensorflow as tf return tf.expand_dims(_UpperCamelCase , axis=_UpperCamelCase ) elif is_jax_tensor(_UpperCamelCase ): return jnp.expand_dims(_UpperCamelCase , axis=_UpperCamelCase ) else: raise ValueError(f"Type not supported for expand_dims: {type(_UpperCamelCase )}." ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if is_numpy_array(_UpperCamelCase ): return np.size(_UpperCamelCase ) elif is_torch_tensor(_UpperCamelCase ): return array.numel() elif is_tf_tensor(_UpperCamelCase ): import tensorflow as tf return tf.size(_UpperCamelCase ) elif is_jax_tensor(_UpperCamelCase ): return array.size else: raise ValueError(f"Type not supported for expand_dims: {type(_UpperCamelCase )}." ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' for key, value in auto_map.items(): if isinstance(_UpperCamelCase , (tuple, list) ): __lowerCAmelCase = [f"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: __lowerCAmelCase = f"{repo_id}--{value}" return auto_map def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' for base_class in inspect.getmro(_UpperCamelCase ): __lowerCAmelCase = base_class.__module__ __lowerCAmelCase = base_class.__name__ if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch" ) or name == "PreTrainedModel": return "pt" elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f"Could not infer framework from class {model_class}." )
636
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class _UpperCAmelCase ( lowerCAmelCase_ ): a : torch.FloatTensor class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,): '''simple docstring''' super().__init__() __lowerCAmelCase = layers_per_block __lowerCAmelCase = torch.nn.Convad( __SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,) __lowerCAmelCase = None __lowerCAmelCase = nn.ModuleList([] ) # down __lowerCAmelCase = block_out_channels[0] for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = output_channel __lowerCAmelCase = block_out_channels[i] __lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1 __lowerCAmelCase = get_down_block( __SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) self.down_blocks.append(__SCREAMING_SNAKE_CASE ) # mid __lowerCAmelCase = UNetMidBlockaD( in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) # out __lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 ) __lowerCAmelCase = nn.SiLU() __lowerCAmelCase = 2 * out_channels if double_z else out_channels __lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 ) __lowerCAmelCase = False def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = x __lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE ) if self.training and self.gradient_checkpointing: def create_custom_forward(__SCREAMING_SNAKE_CASE ): def custom_forward(*__SCREAMING_SNAKE_CASE ): return module(*__SCREAMING_SNAKE_CASE ) return custom_forward # down if is_torch_version(""">=""","""1.11.0""" ): for down_block in self.down_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) else: for down_block in self.down_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE ) else: # down for down_block in self.down_blocks: __lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE ) # middle __lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE ) # post-process __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE ) return sample class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",): '''simple docstring''' super().__init__() __lowerCAmelCase = layers_per_block __lowerCAmelCase = nn.Convad( __SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,) __lowerCAmelCase = None __lowerCAmelCase = nn.ModuleList([] ) __lowerCAmelCase = in_channels if norm_type == """spatial""" else None # mid __lowerCAmelCase = UNetMidBlockaD( in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,) # up __lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase = output_channel __lowerCAmelCase = reversed_block_out_channels[i] __lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1 __lowerCAmelCase = get_up_block( __SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,) self.up_blocks.append(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = output_channel # out if norm_type == "spatial": __lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 ) __lowerCAmelCase = nn.SiLU() __lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 ) __lowerCAmelCase = False def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' __lowerCAmelCase = z __lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(__SCREAMING_SNAKE_CASE ): def custom_forward(*__SCREAMING_SNAKE_CASE ): return module(*__SCREAMING_SNAKE_CASE ) return custom_forward if is_torch_version(""">=""","""1.11.0""" ): # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE ) else: # middle __lowerCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) else: # middle __lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: __lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) # post-process if latent_embeds is None: __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE ) return sample class _UpperCAmelCase ( nn.Module ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ): '''simple docstring''' super().__init__() __lowerCAmelCase = n_e __lowerCAmelCase = vq_embed_dim __lowerCAmelCase = beta __lowerCAmelCase = legacy __lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e ) __lowerCAmelCase = remap if self.remap is not None: self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) ) __lowerCAmelCase = self.used.shape[0] __lowerCAmelCase = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": __lowerCAmelCase = self.re_embed __lowerCAmelCase = self.re_embed + 1 print( f'Remapping {self.n_e} indices to {self.re_embed} indices. ' f'Using {self.unknown_index} for unknown indices.' ) else: __lowerCAmelCase = n_e __lowerCAmelCase = sane_index_shape def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = inds.shape assert len(__SCREAMING_SNAKE_CASE ) > 1 __lowerCAmelCase = inds.reshape(ishape[0],-1 ) __lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long() __lowerCAmelCase = match.argmax(-1 ) __lowerCAmelCase = match.sum(2 ) < 1 if self.unknown_index == "random": __lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device ) else: __lowerCAmelCase = self.unknown_index return new.reshape(__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = inds.shape assert len(__SCREAMING_SNAKE_CASE ) > 1 __lowerCAmelCase = inds.reshape(ishape[0],-1 ) __lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE ) if self.re_embed > self.used.shape[0]: # extra token __lowerCAmelCase = 0 # simply set to zero __lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE ) return back.reshape(__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = z.permute(0,2,3,1 ).contiguous() __lowerCAmelCase = z.view(-1,self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z __lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 ) __lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape ) __lowerCAmelCase = None __lowerCAmelCase = None # compute loss for embedding if not self.legacy: __lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: __lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients __lowerCAmelCase = z + (z_q - z).detach() # reshape back to match original input shape __lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous() if self.remap is not None: __lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis __lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten if self.sane_index_shape: __lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): '''simple docstring''' if self.remap is not None: __lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis __lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = indices.reshape(-1 ) # flatten again # get quantized latent vectors __lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ) if shape is not None: __lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE ) # reshape back to match original input shape __lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous() return z_q class _UpperCAmelCase ( lowerCAmelCase_ ): def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __lowerCAmelCase = parameters __lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 ) __lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 ) __lowerCAmelCase = deterministic __lowerCAmelCase = torch.exp(0.5 * self.logvar ) __lowerCAmelCase = torch.exp(self.logvar ) if self.deterministic: __lowerCAmelCase = __lowerCAmelCase = torch.zeros_like( self.mean,device=self.parameters.device,dtype=self.parameters.dtype ) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ): '''simple docstring''' __lowerCAmelCase = randn_tensor( self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype ) __lowerCAmelCase = self.mean + self.std * sample return x def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean,2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar,dim=[1, 2, 3],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) __lowerCAmelCase = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): '''simple docstring''' return self.mean
689
0
"""simple docstring""" from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
178
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features _a : Optional[int] = logging.get_logger(__name__) _a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) _a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} ) a : str =field( default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} ) a : int =field( default=1_28 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a : int =field( default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , ) a : int =field( default=64 , metadata={ """help""": ( """The maximum number of tokens for the question. Questions longer than this will """ """be truncated to this length.""" ) } , ) a : int =field( default=30 , metadata={ """help""": ( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ) } , ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) a : bool =field( default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} ) a : float =field( default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) a : int =field( default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) a : int =field( default=0 , metadata={ """help""": ( """language id of input for language-specific xlm models (see""" """ tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)""" ) } , ) a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} ) class _UpperCAmelCase ( lowerCAmelCase_ ): a : Optional[Any] ="""train""" a : Optional[int] ="""dev""" class _UpperCAmelCase ( lowerCAmelCase_ ): a : SquadDataTrainingArguments a : List[SquadFeatures] a : Split a : bool def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",): '''simple docstring''' __lowerCAmelCase = args __lowerCAmelCase = is_language_sensitive __lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): try: __lowerCAmelCase = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) __lowerCAmelCase = mode # Load data features from cache or dataset file __lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1""" __lowerCAmelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowerCAmelCase = cached_features_file + """.lock""" with FileLock(__SCREAMING_SNAKE_CASE ): if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache: __lowerCAmelCase = time.time() __lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. __lowerCAmelCase = self.old_features["""features"""] __lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE ) logger.info( f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in' """ future run""" ) else: if mode == Split.dev: __lowerCAmelCase = self.processor.get_dev_examples(args.data_dir ) else: __lowerCAmelCase = self.processor.get_train_examples(args.data_dir ) __lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features( examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = time.time() torch.save( {"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self,__SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCAmelCase = self.features[i] __lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float ) __lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float ) __lowerCAmelCase = { """input_ids""": input_ids, """attention_mask""": attention_mask, """token_type_ids""": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} ) if self.args.version_2_with_negative: inputs.update({"""is_impossible""": is_impossible} ) if self.is_language_sensitive: inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: __lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long ) __lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long ) inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} ) return inputs
689
0
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple: """simple docstring""" if isinstance(__lowerCAmelCase , collections.abc.Iterable ): return x return (x, x) @require_flax class a : def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :int ,__lowercase :Tuple ): pass def __lowerCamelCase ( self :Optional[int] ): pass def __lowerCamelCase ( self :Optional[Any] ): pass def __lowerCamelCase ( self :str ,__lowercase :int ,__lowercase :Any ,__lowercase :Optional[Any] ): snake_case__ : Any = np.abs((a - b) ).max() self.assertLessEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,F"""Difference between torch and flax is {diff} (>= {tol}).""" ) def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[int] ,__lowercase :str ,__lowercase :Optional[int] ,__lowercase :Optional[int] ,__lowercase :str=None ,**__lowercase :Optional[int] ): snake_case__ : int = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) snake_case__ : str = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE ) snake_case__ : Tuple = model(input_ids=__SCREAMING_SNAKE_CASE ,pixel_values=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ) self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], config.projection_dim) ) def __lowerCamelCase ( self :str ,__lowercase :List[Any] ,__lowercase :Any ,__lowercase :Optional[Any] ,__lowercase :int ,__lowercase :Any=None ,**__lowercase :str ): snake_case__ , snake_case__ : Union[str, Any] = self.get_vision_text_model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) snake_case__ : str = {'''vision_model''': vision_model, '''text_model''': text_model} snake_case__ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE ) snake_case__ : List[str] = model(input_ids=__SCREAMING_SNAKE_CASE ,pixel_values=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ) self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) ) def __lowerCamelCase ( self :List[Any] ,__lowercase :List[str] ,__lowercase :int ,__lowercase :Optional[Any] ,__lowercase :Dict ,__lowercase :Any=None ,**__lowercase :List[Any] ): snake_case__ , snake_case__ : str = self.get_vision_text_model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) snake_case__ : str = {'''vision_model''': vision_model, '''text_model''': text_model} snake_case__ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE ) snake_case__ : int = model(input_ids=__SCREAMING_SNAKE_CASE ,pixel_values=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ) snake_case__ : Any = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE ) snake_case__ : int = model(input_ids=__SCREAMING_SNAKE_CASE ,pixel_values=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ) snake_case__ : Any = after_output[0] snake_case__ : Tuple = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__SCREAMING_SNAKE_CASE ,1e-3 ) def __lowerCamelCase ( self :Optional[int] ,__lowercase :Dict ,__lowercase :int ,__lowercase :Optional[Any] ,__lowercase :Union[str, Any] ,__lowercase :str=None ,**__lowercase :int ): snake_case__ , snake_case__ : List[Any] = self.get_vision_text_model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) snake_case__ : int = {'''vision_model''': vision_model, '''text_model''': text_model} snake_case__ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[int] = model( input_ids=__SCREAMING_SNAKE_CASE ,pixel_values=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,output_attentions=__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = output.vision_model_output.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case__ : int = to_atuple(vision_model.config.image_size ) snake_case__ : Optional[Any] = to_atuple(vision_model.config.patch_size ) snake_case__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) snake_case__ : Dict = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) ) snake_case__ : List[Any] = output.text_model_output.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,) def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[int] ,__lowercase :List[Any] ,__lowercase :Optional[int] ): pt_model.to(__SCREAMING_SNAKE_CASE ) pt_model.eval() # prepare inputs snake_case__ : Dict = inputs_dict snake_case__ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): snake_case__ : int = pt_model(**__SCREAMING_SNAKE_CASE ).to_tuple() snake_case__ : Any = fx_model(**__SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,len(__SCREAMING_SNAKE_CASE ) ,'''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ): self.assert_almost_equals(__SCREAMING_SNAKE_CASE ,pt_output.numpy() ,4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE ,from_pt=__SCREAMING_SNAKE_CASE ) snake_case__ : Dict = fx_model_loaded(**__SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,len(__SCREAMING_SNAKE_CASE ) ,'''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ): self.assert_almost_equals(__SCREAMING_SNAKE_CASE ,pt_output.numpy() ,4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = VisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE ,from_flax=__SCREAMING_SNAKE_CASE ) pt_model_loaded.to(__SCREAMING_SNAKE_CASE ) pt_model_loaded.eval() with torch.no_grad(): snake_case__ : Tuple = pt_model_loaded(**__SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,len(__SCREAMING_SNAKE_CASE ) ,'''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ): self.assert_almost_equals(__SCREAMING_SNAKE_CASE ,pt_output_loaded.numpy() ,4e-2 ) def __lowerCamelCase ( self :List[str] ,__lowercase :int ,__lowercase :Optional[int] ,__lowercase :Tuple ): snake_case__ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) snake_case__ : int = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE ) snake_case__ : Any = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE ) snake_case__ : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = fx_state self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Tuple ,__lowercase :Any ,__lowercase :List[str] ): snake_case__ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) snake_case__ : int = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE ) snake_case__ : Tuple = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE ,fx_model.params ) self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self :List[Any] ): snake_case__ : List[Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self :int ): snake_case__ : List[str] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self :Union[str, Any] ): snake_case__ : Optional[int] = self.prepare_config_and_inputs() self.check_save_load(**__SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self :str ): snake_case__ : Tuple = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE ) @is_pt_flax_cross_test def __lowerCamelCase ( self :str ): snake_case__ : int = self.prepare_config_and_inputs() snake_case__ : Optional[Any] = config_inputs_dict.pop('''vision_config''' ) snake_case__ : str = config_inputs_dict.pop('''text_config''' ) snake_case__ : List[Any] = config_inputs_dict self.check_equivalence_pt_to_flax(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) self.check_equivalence_flax_to_pt(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self :Optional[int] ): snake_case__ , snake_case__ : List[Any] = self.get_pretrained_model_and_inputs() snake_case__ : Optional[int] = model_a(**__SCREAMING_SNAKE_CASE ) snake_case__ : Tuple = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__SCREAMING_SNAKE_CASE ) snake_case__ : int = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE ) snake_case__ : str = model_a(**__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = after_outputs[0] snake_case__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__SCREAMING_SNAKE_CASE ,1e-5 ) @require_flax class a ( lowerCAmelCase_ , unittest.TestCase ): def __lowerCamelCase ( self :Optional[Any] ): snake_case__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=__SCREAMING_SNAKE_CASE ,text_from_pt=__SCREAMING_SNAKE_CASE ,) snake_case__ : List[str] = 1_3 snake_case__ : Tuple = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) snake_case__ : List[Any] = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size ) snake_case__ : Optional[Any] = random_attention_mask([batch_size, 4] ) snake_case__ : Union[str, Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def __lowerCamelCase ( self :Tuple ,__lowercase :int ,__lowercase :str ): snake_case__ : Tuple = FlaxViTModel(__SCREAMING_SNAKE_CASE ) snake_case__ : Tuple = FlaxBertModel(__SCREAMING_SNAKE_CASE ) return vision_model, text_model def __lowerCamelCase ( self :Optional[Any] ): snake_case__ : Tuple = FlaxViTModelTester(self ) snake_case__ : Tuple = FlaxBertModelTester(self ) snake_case__ : List[str] = vit_model_tester.prepare_config_and_inputs() snake_case__ : Tuple = bert_model_tester.prepare_config_and_inputs() snake_case__ , snake_case__ : List[str] = vision_config_and_inputs snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class a ( lowerCAmelCase_ , unittest.TestCase ): def __lowerCamelCase ( self :Optional[Any] ): snake_case__ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=__SCREAMING_SNAKE_CASE ,text_from_pt=__SCREAMING_SNAKE_CASE ,) snake_case__ : int = 1_3 snake_case__ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) snake_case__ : Optional[int] = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size ) snake_case__ : Dict = random_attention_mask([batch_size, 4] ) snake_case__ : Dict = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def __lowerCamelCase ( self :str ,__lowercase :List[Any] ,__lowercase :Union[str, Any] ): snake_case__ : Any = FlaxCLIPVisionModel(__SCREAMING_SNAKE_CASE ) snake_case__ : Union[str, Any] = FlaxBertModel(__SCREAMING_SNAKE_CASE ) return vision_model, text_model def __lowerCamelCase ( self :Union[str, Any] ): snake_case__ : List[str] = FlaxCLIPVisionModelTester(self ) snake_case__ : Tuple = FlaxBertModelTester(self ) snake_case__ : str = clip_model_tester.prepare_config_and_inputs() snake_case__ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() snake_case__ , snake_case__ : List[Any] = vision_config_and_inputs snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class a ( unittest.TestCase ): @slow def __lowerCamelCase ( self :Union[str, Any] ): snake_case__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' ,logit_scale_init_value=1.0 ) snake_case__ : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) snake_case__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) snake_case__ : Optional[Any] = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] ,images=__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,return_tensors='''np''' ) snake_case__ : Tuple = model(**__SCREAMING_SNAKE_CASE ) # verify the logits self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,) snake_case__ : Union[str, Any] = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
252
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _lowerCAmelCase ( lowercase ) -> Optional[Any]: # vision encoder if "img_encoder.pos_embed" in name: __lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: __lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: __lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: __lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: __lowerCAmelCase = name.replace("""blocks""" , """layers""" ) if "attn" in name and "pre_assign" not in name: __lowerCAmelCase = name.replace("""attn""" , """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: __lowerCAmelCase = name.replace("""proj""" , """out_proj""" ) if "pre_assign_attn.attn.proj" in name: __lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: __lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" ) if "img_encoder.norm" in name: __lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: __lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: __lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: __lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" ) if "ln_1" in name: __lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: __lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: __lowerCAmelCase = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: __lowerCAmelCase = name.replace("""c_proj""" , """fc2""" ) if "text_encoder" in name: __lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" ) if "ln_final" in name: __lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: __lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" ) if "img_projector.linear_out." in name: __lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: __lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" ) if "text_projector.linear_out" in name: __lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> Dict: for key in orig_state_dict.copy().keys(): __lowerCAmelCase = orig_state_dict.pop(lowercase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __lowerCAmelCase = key.split(""".""" ) __lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] ) __lowerCAmelCase = config.vision_config.hidden_size if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[dim : dim * 2, :] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val[:dim] __lowerCAmelCase = val[dim : dim * 2] __lowerCAmelCase = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __lowerCAmelCase = key.split(""".""" ) __lowerCAmelCase = int(key_split[3] ) __lowerCAmelCase = config.text_config.hidden_size if "weight" in key: __lowerCAmelCase = val[:dim, :] __lowerCAmelCase = val[ dim : dim * 2, : ] __lowerCAmelCase = val[-dim:, :] else: __lowerCAmelCase = val[:dim] __lowerCAmelCase = val[dim : dim * 2] __lowerCAmelCase = val[-dim:] else: __lowerCAmelCase = rename_key(lowercase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): __lowerCAmelCase = val.squeeze_() else: __lowerCAmelCase = val return orig_state_dict def _lowerCAmelCase ( ) -> str: __lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]: __lowerCAmelCase = GroupViTConfig() __lowerCAmelCase = GroupViTModel(lowercase ).eval() __lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""] __lowerCAmelCase = convert_state_dict(lowercase , lowercase ) __lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0) # verify result __lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) __lowerCAmelCase = prepare_img() __lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" ) with torch.no_grad(): __lowerCAmelCase = model(**lowercase ) if model_name == "groupvit-gcc-yfcc": __lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] ) elif model_name == "groupvit-gcc-redcaps": __lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 ) processor.save_pretrained(lowercase ) model.save_pretrained(lowercase ) print("""Successfully saved processor and model to""" , lowercase ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(lowercase , organization="""nielsr""" ) model.push_to_hub(lowercase , organization="""nielsr""" ) if __name__ == "__main__": _a : int = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _a : List[str] = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
689
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _a: Optional[Any] = 16 _a: Any = 32 def __lowerCAmelCase ( A , A = 16 ): UpperCAmelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" ) UpperCAmelCase_ = load_dataset("glue" , "mrpc" ) def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase_ = datasets.map( A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase_ = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase_ = 8 else: UpperCAmelCase_ = None return tokenizer.pad( A , padding="longest" , max_length=A , pad_to_multiple_of=A , return_tensors="pt" , ) # Instantiate dataloaders. UpperCAmelCase_ = DataLoader( tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A ) UpperCAmelCase_ = DataLoader( tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _a: int = mocked_dataloaders # noqa: F811 def __lowerCAmelCase ( A , A ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , A ) == "1": UpperCAmelCase_ = 2 # Initialize accelerator UpperCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ = config["lr"] UpperCAmelCase_ = int(config["num_epochs"] ) UpperCAmelCase_ = int(config["seed"] ) UpperCAmelCase_ = int(config["batch_size"] ) UpperCAmelCase_ = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation UpperCAmelCase_ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase_ = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase_ = MAX_GPU_BATCH_SIZE set_seed(A ) UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(A , A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=A ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase_ = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase_ = AdamW(params=model.parameters() , lr=A ) # Instantiate scheduler UpperCAmelCase_ = get_linear_schedule_with_warmup( optimizer=A , num_warmup_steps=100 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare( A , A , A , A , A ) # Now we train the model for epoch in range(A ): model.train() for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase_ = model(**A ) UpperCAmelCase_ = outputs.loss UpperCAmelCase_ = loss / gradient_accumulation_steps accelerator.backward(A ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() UpperCAmelCase_ = 0 for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ = model(**A ) UpperCAmelCase_ = outputs.logits.argmax(dim=-1 ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((predictions, batch["labels"]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(A ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples UpperCAmelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=A , references=A , ) UpperCAmelCase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , A ) def __lowerCAmelCase ( ): UpperCAmelCase_ = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=A , default=A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(A , A ) if __name__ == "__main__": main()
162
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() _a : Tuple = logging.get_logger(__name__) _a : Optional[int] = ["""model.decoder.embed_positions.weights"""] def _lowerCAmelCase ( lowercase ) -> Optional[Any]: if "emb" in name: __lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: __lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: __lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: __lowerCAmelCase = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: __lowerCAmelCase = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: __lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: __lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: __lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: __lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: __lowerCAmelCase = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: __lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]: __lowerCAmelCase = list(state_dict.keys() ) __lowerCAmelCase = {} for key in keys: __lowerCAmelCase = state_dict.pop(lowercase ) __lowerCAmelCase = rename_keys(lowercase ) if "in_proj_weight" in key: # split fused qkv proj __lowerCAmelCase = val[:hidden_size, :] __lowerCAmelCase = val[hidden_size : 2 * hidden_size, :] __lowerCAmelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __lowerCAmelCase = val else: __lowerCAmelCase = val return state_dict, enc_dec_proj_state_dict def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values __lowerCAmelCase = 1024 __lowerCAmelCase = 24 __lowerCAmelCase = 16 elif checkpoint == "medium": __lowerCAmelCase = 1536 __lowerCAmelCase = 48 __lowerCAmelCase = 24 elif checkpoint == "large": __lowerCAmelCase = 2048 __lowerCAmelCase = 48 __lowerCAmelCase = 32 else: raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) __lowerCAmelCase = MusicgenDecoderConfig( hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , ) return config @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]: __lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase ) __lowerCAmelCase = decoder_config_from_checkpoint(lowercase ) __lowerCAmelCase = fairseq_model.lm.state_dict() __lowerCAmelCase , __lowerCAmelCase = rename_state_dict( lowercase , hidden_size=decoder_config.hidden_size ) __lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" ) __lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) __lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(lowercase ) if len(lowercase ) > 0: raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' ) if len(lowercase ) > 0: raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model __lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(lowercase ) # check we can do a forward pass __lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __lowerCAmelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits if logits.shape != (8, 1, 2048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor __lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" ) __lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) __lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase ) # set the appropriate bos/pad token ids __lowerCAmelCase = 2048 __lowerCAmelCase = 2048 # set other default generation config params __lowerCAmelCase = int(30 * audio_encoder.config.frame_rate ) __lowerCAmelCase = True __lowerCAmelCase = 3.0 if pytorch_dump_folder is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(lowercase ) processor.save_pretrained(lowercase ) if repo_id: logger.info(f'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(lowercase ) processor.push_to_hub(lowercase ) if __name__ == "__main__": _a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) _a : List[Any] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
689
0
'''simple docstring''' def _lowercase (SCREAMING_SNAKE_CASE ): '''simple docstring''' if n_term == "": return [] __A : List[Any] = [] for temp in range(int(SCREAMING_SNAKE_CASE ) ): series.append(f"1/{temp + 1}" if series else "1" ) return series if __name__ == "__main__": _UpperCamelCase = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
111
'''simple docstring''' from collections import deque def _lowerCAmelCase ( lowercase ) -> Dict: __lowerCAmelCase = len(lowercase ) __lowerCAmelCase = deque() __lowerCAmelCase = [False for _ in range(lowercase )] __lowerCAmelCase = [-1 for _ in range(lowercase )] __lowerCAmelCase = index_of[:] def strong_connect(lowercase , lowercase , lowercase ): __lowerCAmelCase = index # the number when this node is seen __lowerCAmelCase = index # lowest rank node reachable from here index += 1 stack.append(lowercase ) __lowerCAmelCase = True for w in g[v]: if index_of[w] == -1: __lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase ) __lowerCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: __lowerCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: __lowerCAmelCase = [] __lowerCAmelCase = stack.pop() __lowerCAmelCase = False component.append(lowercase ) while w != v: __lowerCAmelCase = stack.pop() __lowerCAmelCase = False component.append(lowercase ) components.append(lowercase ) return index __lowerCAmelCase = [] for v in range(lowercase ): if index_of[v] == -1: strong_connect(lowercase , 0 , lowercase ) return components def _lowerCAmelCase ( lowercase , lowercase ) -> str: __lowerCAmelCase = [[] for _ in range(lowercase )] for u, v in edges: g[u].append(lowercase ) return g if __name__ == "__main__": # Test _a : Any = 7 _a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6] _a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5] _a : Optional[Any] = [(u, v) for u, v in zip(source, target)] _a : Optional[int] = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
689
0
'''simple docstring''' import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class SCREAMING_SNAKE_CASE__ : def __init__( self: Tuple , a: Dict , a: str=13 , a: Optional[int]=7 , a: Any=True , a: int=True , a: Dict=False , a: str=True , a: Optional[int]=99 , a: Any=32 , a: List[str]=5 , a: Dict=4 , a: List[str]=37 , a: Optional[int]="gelu" , a: int=0.1 , a: int=0.1 , a: str=5_12 , a: str=16 , a: List[str]=2 , a: Any=0.02 , a: str=3 , a: List[Any]=4 , a: Dict=None , ) ->List[str]: '''simple docstring''' a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_input_mask a_ = use_token_type_ids a_ = use_labels a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = type_sequence_label_size a_ = initializer_range a_ = num_labels a_ = num_choices a_ = scope def _lowerCAmelCase ( self: str) ->List[str]: '''simple docstring''' a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a_ = None if self.use_input_mask: a_ = random_attention_mask([self.batch_size, self.seq_length]) a_ = None if self.use_token_type_ids: a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) a_ = None a_ = None a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) a_ = ids_tensor([self.batch_size] , self.num_choices) a_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCAmelCase ( self: int) ->List[Any]: '''simple docstring''' return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=__SCREAMING_SNAKE_CASE , ) def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Union[str, Any] , a: List[str] , a: int , a: List[str] , a: Any , a: List[str]) ->Dict: '''simple docstring''' a_ = OpenLlamaModel(config=__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() a_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE) a_ = model(__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _lowerCAmelCase ( self: List[str] , a: int , a: Optional[Any] , a: List[Any] , a: int , a: str , a: Optional[int] , a: int , a: List[str] , a: Union[str, Any] , ) ->Dict: '''simple docstring''' a_ = True a_ = OpenLlamaModel(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() a_ = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , ) a_ = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ) a_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _lowerCAmelCase ( self: Union[str, Any] , a: Optional[int] , a: Dict , a: Union[str, Any] , a: Optional[int] , a: Optional[int] , a: Union[str, Any] , a: List[str] , a: List[str] , a: Dict , ) ->List[str]: '''simple docstring''' a_ = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() a_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _lowerCAmelCase ( self: List[str] , a: List[Any] , a: Dict , a: Optional[int] , a: Tuple , a: Union[str, Any] , a: Any , a: Optional[int] , a: Union[str, Any] , a: str , ) ->Optional[Any]: '''simple docstring''' a_ = True a_ = True a_ = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() # first forward pass a_ = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , ) a_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids a_ = ids_tensor((self.batch_size, 3) , config.vocab_size) a_ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and a_ = torch.cat([input_ids, next_tokens] , dim=-1) a_ = torch.cat([input_mask, next_mask] , dim=-1) a_ = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )["hidden_states"][0] a_ = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )["hidden_states"][0] # select random slice a_ = ids_tensor((1,) , output_from_past.shape[-1]).item() a_ = output_from_no_past[:, -3:, random_slice_idx].detach() a_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3)) def _lowerCAmelCase ( self: List[Any]) ->str: '''simple docstring''' a_ = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) = config_and_inputs a_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _UpperCAmelCase =( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) _UpperCAmelCase =(OpenLlamaForCausalLM,) if is_torch_available() else () _UpperCAmelCase =( { """feature-extraction""": OpenLlamaModel, """text-classification""": OpenLlamaForSequenceClassification, """text-generation""": OpenLlamaForCausalLM, """zero-shot""": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase =False _UpperCAmelCase =False def _lowerCAmelCase ( self: int) ->Dict: '''simple docstring''' a_ = OpenLlamaModelTester(self) a_ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37) def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _lowerCAmelCase ( self: Dict) ->List[Any]: '''simple docstring''' a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE) def _lowerCAmelCase ( self: Optional[int]) ->Dict: '''simple docstring''' a_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a_ = type self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE) def _lowerCAmelCase ( self: Tuple) ->str: '''simple docstring''' a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = 3 a_ = input_dict["input_ids"] a_ = input_ids.ne(1).to(__SCREAMING_SNAKE_CASE) a_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) a_ = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() a_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def _lowerCAmelCase ( self: str) ->Optional[Any]: '''simple docstring''' a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = 3 a_ = "single_label_classification" a_ = input_dict["input_ids"] a_ = input_ids.ne(1).to(__SCREAMING_SNAKE_CASE) a_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) a_ = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() a_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def _lowerCAmelCase ( self: Optional[int]) ->List[str]: '''simple docstring''' a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = 3 a_ = "multi_label_classification" a_ = input_dict["input_ids"] a_ = input_ids.ne(1).to(__SCREAMING_SNAKE_CASE) a_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) a_ = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() a_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip("Open-Llama buffers include complex numbers, which breaks this test") def _lowerCAmelCase ( self: List[Any]) ->Optional[Any]: '''simple docstring''' pass @parameterized.expand([("linear",), ("dynamic",)]) def _lowerCAmelCase ( self: Optional[Any] , a: int) ->Dict: '''simple docstring''' a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = ids_tensor([1, 10] , config.vocab_size) a_ = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights a_ = OpenLlamaModel(__SCREAMING_SNAKE_CASE) original_model.to(__SCREAMING_SNAKE_CASE) original_model.eval() a_ = original_model(__SCREAMING_SNAKE_CASE).last_hidden_state a_ = original_model(__SCREAMING_SNAKE_CASE).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights a_ = {"type": scaling_type, "factor": 10.0} a_ = OpenLlamaModel(__SCREAMING_SNAKE_CASE) scaled_model.to(__SCREAMING_SNAKE_CASE) scaled_model.eval() a_ = scaled_model(__SCREAMING_SNAKE_CASE).last_hidden_state a_ = scaled_model(__SCREAMING_SNAKE_CASE).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5)) else: self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5))
685
'''simple docstring''' from argparse import ArgumentParser from .env import EnvironmentCommand def _lowerCAmelCase ( ) -> Union[str, Any]: __lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) __lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(lowercase ) # Let's go __lowerCAmelCase = parser.parse_args() if not hasattr(lowercase , """func""" ): parser.print_help() exit(1 ) # Run __lowerCAmelCase = args.func(lowercase ) service.run() if __name__ == "__main__": main()
689
0
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowercase_ : List[str] = ArgumentParser( description=( "PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=_UpperCamelCase , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=_UpperCamelCase , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=_UpperCamelCase ) return parser.parse_args() def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowercase_ : List[Any] = parse_args() # Import training_script as a module. lowercase_ : Any = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowercase_ : Tuple = script_fpath.stem lowercase_ : str = importlib.import_module(_UpperCamelCase ) # Patch sys.argv lowercase_ : Optional[int] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
620
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _a : List[Any] = logging.get_logger(__name__) _a : int = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } _a : Any = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str: for attribute in key.split(""".""" ): __lowerCAmelCase = getattr(lowercase , lowercase ) if weight_type is not None: __lowerCAmelCase = getattr(lowercase , lowercase ).shape else: __lowerCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": __lowerCAmelCase = value elif weight_type == "weight_g": __lowerCAmelCase = value elif weight_type == "weight_v": __lowerCAmelCase = value elif weight_type == "bias": __lowerCAmelCase = value else: __lowerCAmelCase = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]: __lowerCAmelCase = [] __lowerCAmelCase = fairseq_model.state_dict() __lowerCAmelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , ) __lowerCAmelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key): # special case since naming is very similar continue __lowerCAmelCase = True if "*" in mapped_key: __lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2] __lowerCAmelCase = mapped_key.replace("""*""" , lowercase ) if "weight_g" in name: __lowerCAmelCase = """weight_g""" elif "weight_v" in name: __lowerCAmelCase = """weight_v""" elif "bias" in name: __lowerCAmelCase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase = """weight""" else: __lowerCAmelCase = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(f'Unused weights: {unused_weights}' ) def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __lowerCAmelCase = full_name.split("""conv_layers.""" )[-1] __lowerCAmelCase = name.split(""".""" ) __lowerCAmelCase = int(items[0] ) __lowerCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __lowerCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowercase ) @torch.no_grad() def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict: if config_path is not None: __lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase ) else: __lowerCAmelCase = UniSpeechSatConfig() __lowerCAmelCase = """""" if is_finetuned: __lowerCAmelCase = UniSpeechSatForCTC(lowercase ) else: __lowerCAmelCase = UniSpeechSatForPreTraining(lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) __lowerCAmelCase = model[0].eval() recursively_load_weights(lowercase , lowercase ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": _a : List[str] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) _a : Union[str, Any] = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
689
0