code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Dict = { 'configuration_xlm_roberta_xl': [ 'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaXLConfig', 'XLMRobertaXLOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Any = [ 'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaXLForCausalLM', 'XLMRobertaXLForMaskedLM', 'XLMRobertaXLForMultipleChoice', 'XLMRobertaXLForQuestionAnswering', 'XLMRobertaXLForSequenceClassification', 'XLMRobertaXLForTokenClassification', 'XLMRobertaXLModel', 'XLMRobertaXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
48
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/unispeech-large-1500h-cv": ( "https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "unispeech" def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str: '''simple docstring''' super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =hidden_size lowerCamelCase__: List[str] =feat_extract_norm lowerCamelCase__: Dict =feat_extract_activation lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_) lowerCamelCase__: Any =list(UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_) lowerCamelCase__: Dict =conv_bias lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings lowerCamelCase__: Dict =num_conv_pos_embedding_groups lowerCamelCase__: int =len(self.conv_dim) lowerCamelCase__: Union[str, Any] =num_hidden_layers lowerCamelCase__: Union[str, Any] =intermediate_size lowerCamelCase__: Dict =hidden_act lowerCamelCase__: List[Any] =num_attention_heads lowerCamelCase__: Dict =hidden_dropout lowerCamelCase__: Optional[Any] =attention_dropout lowerCamelCase__: Optional[Any] =activation_dropout lowerCamelCase__: Tuple =feat_proj_dropout lowerCamelCase__: int =final_dropout lowerCamelCase__: Optional[Any] =layerdrop lowerCamelCase__: Dict =layer_norm_eps lowerCamelCase__: Optional[Any] =initializer_range lowerCamelCase__: int =num_ctc_classes lowerCamelCase__: Tuple =vocab_size lowerCamelCase__: Dict =do_stable_layer_norm lowerCamelCase__: List[Any] =use_weighted_layer_sum lowerCamelCase__: Dict =classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase__: int =apply_spec_augment lowerCamelCase__: List[str] =mask_time_prob lowerCamelCase__: Union[str, Any] =mask_time_length lowerCamelCase__: List[Any] =mask_time_min_masks lowerCamelCase__: Any =mask_feature_prob lowerCamelCase__: Optional[Any] =mask_feature_length lowerCamelCase__: List[str] =mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCamelCase__: Optional[Any] =num_codevectors_per_group lowerCamelCase__: str =num_codevector_groups lowerCamelCase__: Tuple =contrastive_logits_temperature lowerCamelCase__: int =feat_quantizer_dropout lowerCamelCase__: Any =num_negatives lowerCamelCase__: List[str] =codevector_dim lowerCamelCase__: Union[str, Any] =proj_codevector_dim lowerCamelCase__: Any =diversity_loss_weight # ctc loss lowerCamelCase__: Any =ctc_loss_reduction lowerCamelCase__: Dict =ctc_zero_infinity # pretraining loss lowerCamelCase__: Dict =replace_prob @property def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1)
10
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : str=3_0 , _lowerCAmelCase : List[str]=4_0_0 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=0.9 , _lowerCAmelCase : str=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , _lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , ) -> str: """simple docstring""" snake_case_ = size if size is not None else {"shortest_edge": 3_0} snake_case_ = crop_size if crop_size is not None else {"height": 3_0, "width": 3_0} snake_case_ = parent snake_case_ = batch_size snake_case_ = num_channels snake_case_ = min_resolution snake_case_ = max_resolution snake_case_ = do_resize_and_center_crop snake_case_ = size snake_case_ = crop_pct snake_case_ = crop_size snake_case_ = do_normalize snake_case_ = image_mean snake_case_ = image_std def lowerCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = PoolFormerImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" snake_case_ = PoolFormerImageProcessingTester(self ) @property def lowerCAmelCase__ ( self : str ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) ) def lowerCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 3_0} ) self.assertEqual(image_processor.crop_size , {"height": 3_0, "width": 3_0} ) snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2} ) self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} ) def lowerCAmelCase__ ( self : int ) -> Optional[Any]: """simple docstring""" pass def lowerCAmelCase__ ( self : Optional[Any] ) -> Any: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowerCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowerCAmelCase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
159
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float: """simple docstring""" lowerCamelCase__: str =a while True: lowerCamelCase__: Optional[Any] =Decimal(__a ) - ( Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__a ) ) < precision: # noqa: S307 return float(__a ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}') # Find Square Root of 5 print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}') # Exponential Roots print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
10
0
from queue import PriorityQueue from typing import Any import numpy as np def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> float | int: """simple docstring""" for nxt, d in graph[v]: if nxt in visited_forward: continue _lowercase =cst_fwd.get(__a , np.inf ) _lowercase =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _lowercase =new_cost_f _lowercase =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _lowercase =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> int: """simple docstring""" _lowercase =-1 _lowercase =set() _lowercase =set() _lowercase ={source: 0} _lowercase ={destination: 0} _lowercase ={source: None} _lowercase ={destination: None} _lowercase =PriorityQueue() _lowercase =PriorityQueue() _lowercase =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _lowercase =queue_forward.get() visited_forward.add(__a ) _lowercase =queue_backward.get() visited_backward.add(__a ) _lowercase =pass_and_relaxation( __a , __a , __a , __a , __a , __a , __a , __a , __a , ) _lowercase =pass_and_relaxation( __a , __a , __a , __a , __a , __a , __a , __a , __a , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _lowercase =shortest_distance return shortest_path_distance UpperCAmelCase__ = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } UpperCAmelCase__ = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
5
import itertools import math def lowerCAmelCase_ ( __a ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase_ ( ) -> str: """simple docstring""" lowerCamelCase__: Optional[int] =2 while True: if is_prime(__a ): yield num num += 1 def lowerCAmelCase_ ( __a = 10001 ) -> int: """simple docstring""" return next(itertools.islice(prime_generator() , nth - 1 , __a ) ) if __name__ == "__main__": print(f'{solution() = }')
10
0
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") a : int = logging.getLogger(__name__) @dataclass class a : """simple docstring""" a : int = field( default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) a : Any = field( default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , ) a : Optional[int] = field( default=1_024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) a : Optional[int] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) a : Dict = field( default=__SCREAMING_SNAKE_CASE , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) a : str = field( default=__SCREAMING_SNAKE_CASE , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) a : Any = field( default=__SCREAMING_SNAKE_CASE , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) a : Union[str, Any] = field( default=__SCREAMING_SNAKE_CASE , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) } , ) a : Optional[int] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'A csv or a json file containing the training data.'} ) a : Optional[Any] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'A csv or a json file containing the validation data.'} ) a : Tuple = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'A csv or a json file containing the test data.'} ) def UpperCAmelCase ( self : Any ) -> Optional[int]: if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: __UpperCAmelCase : Tuple = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __UpperCAmelCase : Dict = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class a : """simple docstring""" a : List[Any] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) a : Union[str, Any] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) a : str = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) a : Any = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) a : str = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) a : Optional[Any] = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) a : Dict = field( default=__SCREAMING_SNAKE_CASE , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) def lowerCamelCase__ ( ): __UpperCAmelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCAmelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCAmelCase : Any = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) __UpperCAmelCase : Any = training_args.get_process_log_level() logger.setLevel(__a ) datasets.utils.logging.set_verbosity(__a ) transformers.utils.logging.set_verbosity(__a ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __UpperCAmelCase : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __UpperCAmelCase : str = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __UpperCAmelCase : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __UpperCAmelCase : str = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __UpperCAmelCase : Any = data_args.train_file.split(""".""" )[-1] __UpperCAmelCase : Any = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __UpperCAmelCase : Tuple = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f"""load a local file for {key}: {data_files[key]}""" ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files __UpperCAmelCase : int = load_dataset("""csv""" , data_files=__a , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __UpperCAmelCase : str = load_dataset("""json""" , data_files=__a , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __UpperCAmelCase : Union[str, Any] = raw_datasets["train"].features["label"].names __UpperCAmelCase : int = len(__a ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __UpperCAmelCase : Any = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__a , ) __UpperCAmelCase : Union[str, Any] = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __UpperCAmelCase : Optional[Any] = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __UpperCAmelCase : str = False # Some models have set the order of the labels to use, so let's make sure we do use it. __UpperCAmelCase : List[Any] = {"Refused": 0, "Entailed": 1} __UpperCAmelCase : int = {0: "Refused", 1: "Entailed"} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __UpperCAmelCase : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(__lowerCamelCase : Union[str, Any] ): # Tokenize the texts def _convert_table_text_to_pandas(__lowerCamelCase : Dict ): __UpperCAmelCase : int = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] __UpperCAmelCase : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __UpperCAmelCase : List[str] = examples["statement"] __UpperCAmelCase : Any = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) __UpperCAmelCase : int = tokenizer(__a , __a , padding=__a , max_length=__a , truncation=__a ) __UpperCAmelCase : Tuple = examples["label"] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): __UpperCAmelCase : Any = raw_datasets.map( __a , batched=__a , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) __UpperCAmelCase : Optional[Any] = raw_datasets["train"] if data_args.max_train_samples is not None: __UpperCAmelCase : List[str] = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) __UpperCAmelCase : str = raw_datasets["validation"] if data_args.max_eval_samples is not None: __UpperCAmelCase : Dict = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) __UpperCAmelCase : Optional[int] = raw_datasets["test"] if data_args.max_predict_samples is not None: __UpperCAmelCase : str = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(__a ) ) , 3 ): logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__lowerCamelCase : Optional[int] ): __UpperCAmelCase : List[str] = p.predictions[0] if isinstance(p.predictions , __a ) else p.predictions __UpperCAmelCase : Tuple = np.argmax(__a , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __UpperCAmelCase : Optional[int] = default_data_collator elif training_args.fpaa: __UpperCAmelCase : Optional[Any] = DataCollatorWithPadding(__a , pad_to_multiple_of=8 ) else: __UpperCAmelCase : List[Any] = None # Initialize our Trainer __UpperCAmelCase : Optional[int] = Trainer( model=__a , args=__a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__a , tokenizer=__a , data_collator=__a , ) # Training if training_args.do_train: __UpperCAmelCase : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __UpperCAmelCase : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: __UpperCAmelCase : Union[str, Any] = last_checkpoint __UpperCAmelCase : Optional[int] = trainer.train(resume_from_checkpoint=__a ) __UpperCAmelCase : int = train_result.metrics __UpperCAmelCase : Optional[int] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__a ) ) __UpperCAmelCase : List[str] = min(__a , len(__a ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , __a ) trainer.save_metrics("""train""" , __a ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __UpperCAmelCase : Tuple = trainer.evaluate(eval_dataset=__a ) __UpperCAmelCase : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__a ) __UpperCAmelCase : Union[str, Any] = min(__a , len(__a ) ) trainer.log_metrics("""eval""" , __a ) trainer.save_metrics("""eval""" , __a ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __UpperCAmelCase : Dict = predict_dataset.remove_columns("""label""" ) __UpperCAmelCase : int = trainer.predict(__a , metric_key_prefix="""predict""" ).predictions __UpperCAmelCase : List[str] = np.argmax(__a , axis=1 ) __UpperCAmelCase : List[str] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(__a , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(__a ): __UpperCAmelCase : Dict = label_list[item] writer.write(f"""{index}\t{item}\n""" ) __UpperCAmelCase : List[str] = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**__a ) else: trainer.create_model_card(**__a ) def lowerCamelCase__ ( __lowerCamelCase : Tuple ): main() if __name__ == "__main__": main()
114
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __init__(self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=0.9 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->str: '''simple docstring''' lowerCamelCase__: List[Any] =size if size is not None else {"shortest_edge": 30} lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 30, "width": 30} lowerCamelCase__: Any =parent lowerCamelCase__: Any =batch_size lowerCamelCase__: Optional[Any] =num_channels lowerCamelCase__: Tuple =min_resolution lowerCamelCase__: Union[str, Any] =max_resolution lowerCamelCase__: Union[str, Any] =do_resize_and_center_crop lowerCamelCase__: Optional[int] =size lowerCamelCase__: str =crop_pct lowerCamelCase__: Any =crop_size lowerCamelCase__: List[str] =do_normalize lowerCamelCase__: List[str] =image_mean lowerCamelCase__: Tuple =image_std def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]: '''simple docstring''' return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = PoolFormerImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Optional[int] =PoolFormerImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE_ (self : str) ->int: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop")) self.assertTrue(hasattr(UpperCAmelCase_ , "size")) self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct")) self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCAmelCase_ , "image_std")) def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]: '''simple docstring''' lowerCamelCase__: List[str] =self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"shortest_edge": 30}) self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30}) lowerCamelCase__: Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84}) def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any: '''simple docstring''' lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict) # create random PIL images lowerCamelCase__: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image) # Test not batched input lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCamelCase__: int =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict: '''simple docstring''' lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowerCamelCase__: Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray) # Test not batched input lowerCamelCase__: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCamelCase__: List[str] =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any: '''simple docstring''' lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor) # Test not batched input lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
10
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, unittest.TestCase ): lowerCamelCase_ : Optional[int] = StableDiffusionXLImgaImgPipeline lowerCamelCase_ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} lowerCamelCase_ : str = PipelineTesterMixin.required_optional_params - {'''latents'''} lowerCamelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCamelCase_ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS lowerCamelCase_ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCamelCase (self ) -> Tuple: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase_ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) snake_case_ : Tuple = EulerDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , ) torch.manual_seed(0 ) snake_case_ : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) snake_case_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , ) snake_case_ : Optional[Any] = CLIPTextModel(UpperCAmelCase_ ) snake_case_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=UpperCAmelCase_ ) snake_case_ : Union[str, Any] = CLIPTextModelWithProjection(UpperCAmelCase_ ) snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=UpperCAmelCase_ ) snake_case_ : int = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_a, "tokenizer_2": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def lowerCamelCase (self , __magic_name__ , __magic_name__=0 ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) snake_case_ : Any = image / 2 + 0.5 if str(UpperCAmelCase_ ).startswith('''mps''' ): snake_case_ : str = torch.manual_seed(UpperCAmelCase_ ) else: snake_case_ : List[str] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) snake_case_ : Any = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "numpy", "strength": 0.75, } return inputs def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case_ : List[str] = self.get_dummy_components() snake_case_ : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase_ ) snake_case_ : Dict = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) snake_case_ : Dict = self.get_dummy_inputs(UpperCAmelCase_ ) snake_case_ : Union[str, Any] = sd_pipe(**UpperCAmelCase_ ).images snake_case_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : List[Any] = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase (self ) -> List[str]: '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Optional[int] = self.get_dummy_components() snake_case_ : Dict = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase_ ) snake_case_ : str = sd_pipe.to(UpperCAmelCase_ ) snake_case_ : List[Any] = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) # forward without prompt embeds snake_case_ : int = self.get_dummy_inputs(UpperCAmelCase_ ) snake_case_ : List[Any] = 3 * ["this is a negative prompt"] snake_case_ : Tuple = negative_prompt snake_case_ : int = 3 * [inputs["prompt"]] snake_case_ : Tuple = sd_pipe(**UpperCAmelCase_ ) snake_case_ : Tuple = output.images[0, -3:, -3:, -1] # forward with prompt embeds snake_case_ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_ ) snake_case_ : Dict = 3 * ["this is a negative prompt"] snake_case_ : Any = 3 * [inputs.pop('''prompt''' )] ( snake_case_ ) : Tuple = sd_pipe.encode_prompt(UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ ) snake_case_ : int = sd_pipe( **UpperCAmelCase_ , prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , pooled_prompt_embeds=UpperCAmelCase_ , negative_pooled_prompt_embeds=UpperCAmelCase_ , ) snake_case_ : Optional[int] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase (self , __magic_name__ , __magic_name__="cpu" , __magic_name__=torch.floataa , __magic_name__=0 ) -> Tuple: '''simple docstring''' snake_case_ : Optional[int] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) snake_case_ : Union[str, Any] = np.random.RandomState(UpperCAmelCase_ ).standard_normal((1, 4, 64, 64) ) snake_case_ : Union[str, Any] = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ ) snake_case_ : Any = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : List[str] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) snake_case_ : int = self.get_inputs(UpperCAmelCase_ ) snake_case_ : str = pipe(**UpperCAmelCase_ ).images snake_case_ : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case_ : int = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
279
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json" ), }, } __A = { "yjernite/retribert-base-uncased": 512, } __A = { "yjernite/retribert-base-uncased": {"do_lower_case": True}, } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = RetriBertTokenizer lowercase_ = ["input_ids", "attention_mask"] def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]: '''simple docstring''' super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars ): lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type")) lowerCamelCase__: int =do_lower_case lowerCamelCase__: int =strip_accents lowerCamelCase__: List[str] =tokenize_chinese_chars lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_) lowerCamelCase__: Any =do_lower_case def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]: '''simple docstring''' lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' lowerCamelCase__: Tuple =[self.sep_token_id] lowerCamelCase__: Optional[int] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_) return tuple(UpperCAmelCase_)
10
0
'''simple docstring''' class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : Any = n UpperCAmelCase : Tuple = [None] * self.n UpperCAmelCase : str = 0 # index of the first element UpperCAmelCase : Tuple = 0 UpperCAmelCase : Optional[Any] = 0 def __len__( self ): '''simple docstring''' return self.size def A_ ( self ): '''simple docstring''' return self.size == 0 def A_ ( self ): '''simple docstring''' return False if self.is_empty() else self.array[self.front] def A_ ( self , snake_case ): '''simple docstring''' if self.size >= self.n: raise Exception("QUEUE IS FULL" ) UpperCAmelCase : List[Any] = data UpperCAmelCase : Dict = (self.rear + 1) % self.n self.size += 1 return self def A_ ( self ): '''simple docstring''' if self.size == 0: raise Exception("UNDERFLOW" ) UpperCAmelCase : Optional[Any] = self.array[self.front] UpperCAmelCase : Optional[int] = None UpperCAmelCase : Dict = (self.front + 1) % self.n self.size -= 1 return temp
311
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __A = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Any: """simple docstring""" if attention_mask is None: lowerCamelCase__: Optional[Any] =np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: lowerCamelCase__: Dict =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: lowerCamelCase__: Optional[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase__: Any =np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCamelCase__: List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=0.02 , ) ->Optional[int]: '''simple docstring''' lowerCamelCase__: int =parent lowerCamelCase__: List[str] =batch_size lowerCamelCase__: Optional[int] =seq_length lowerCamelCase__: Optional[Any] =is_training lowerCamelCase__: str =use_labels lowerCamelCase__: Optional[Any] =vocab_size lowerCamelCase__: int =hidden_size lowerCamelCase__: Dict =num_hidden_layers lowerCamelCase__: Any =num_attention_heads lowerCamelCase__: str =intermediate_size lowerCamelCase__: int =hidden_act lowerCamelCase__: Tuple =hidden_dropout_prob lowerCamelCase__: List[str] =attention_probs_dropout_prob lowerCamelCase__: Optional[int] =max_position_embeddings lowerCamelCase__: int =eos_token_id lowerCamelCase__: Union[str, Any] =pad_token_id lowerCamelCase__: List[str] =bos_token_id lowerCamelCase__: int =initializer_range def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size) lowerCamelCase__: str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1) lowerCamelCase__: int =shift_tokens_right(UpperCAmelCase_ , 1 , 2) lowerCamelCase__: Dict =BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , ) lowerCamelCase__: Any =prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) return config, inputs_dict def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: Dict =self.prepare_config_and_inputs() return config, inputs_dict def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Optional[Any] =20 lowerCamelCase__: Optional[int] =model_class_name(UpperCAmelCase_) lowerCamelCase__: str =model.encode(inputs_dict["input_ids"]) lowerCamelCase__ , lowerCamelCase__: List[Any] =( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4") lowerCamelCase__: Tuple =jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase__: Union[str, Any] =model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , ) lowerCamelCase__: Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4") lowerCamelCase__: Dict =model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , ) lowerCamelCase__: List[Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""") def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[str] =20 lowerCamelCase__: Optional[Any] =model_class_name(UpperCAmelCase_) lowerCamelCase__: Any =model.encode(inputs_dict["input_ids"]) lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowerCamelCase__: Optional[int] =jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Tuple =jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase__: List[Any] =model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , ) lowerCamelCase__: Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4") lowerCamelCase__: str =model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , ) lowerCamelCase__: Union[str, Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_) lowerCamelCase__: str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""") @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' lowercase_ = 99 def SCREAMING_SNAKE_CASE_ (self : Any) ->int: '''simple docstring''' lowerCamelCase__: Union[str, Any] =np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) lowerCamelCase__: Optional[Any] =input_ids.shape[0] lowerCamelCase__: List[str] =BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._get_config_and_data() lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_) lowerCamelCase__: Dict =lm_model(input_ids=UpperCAmelCase_) lowerCamelCase__: Dict =(batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Dict) ->str: '''simple docstring''' lowerCamelCase__: Optional[int] =BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) lowerCamelCase__: str =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_) lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa) lowerCamelCase__: Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa) lowerCamelCase__: List[str] =lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_) lowerCamelCase__: Optional[int] =(*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple: '''simple docstring''' lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa) lowerCamelCase__: Optional[int] =shift_tokens_right(UpperCAmelCase_ , 1 , 2) lowerCamelCase__: List[str] =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum() lowerCamelCase__: Tuple =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum() self.assertEqual(shifted.shape , input_ids.shape) self.assertEqual(UpperCAmelCase_ , n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0] , 2).all()) @require_flax class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = True lowercase_ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]: '''simple docstring''' lowerCamelCase__: List[Any] =FlaxBlenderbotModelTester(self) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_) @jax.jit def encode_jitted(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str]): return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_) with self.subTest("JIT Enabled"): lowerCamelCase__: Any =encode_jitted(**UpperCAmelCase_).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): lowerCamelCase__: Tuple =encode_jitted(**UpperCAmelCase_).to_tuple() self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_)) for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_): self.assertEqual(jitted_output.shape , output.shape) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_) lowerCamelCase__: List[Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"]) lowerCamelCase__: int ={ "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]): return model.decode( decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , ) with self.subTest("JIT Enabled"): lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple() self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_)) for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_): self.assertEqual(jitted_output.shape , output.shape) @slow def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]: '''simple docstring''' for model_class_name in self.all_model_classes: lowerCamelCase__: Optional[int] =model_class_name.from_pretrained("facebook/blenderbot-400M-distill") # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowerCamelCase__: int =np.ones((1, 1)) * model.config.eos_token_id lowerCamelCase__: str =model(UpperCAmelCase_) self.assertIsNotNone(UpperCAmelCase_) @unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.") @slow def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict: '''simple docstring''' lowerCamelCase__: Dict ={"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25} lowerCamelCase__: Union[str, Any] ={"skip_special_tokens": True, "clean_up_tokenization_spaces": True} lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase_) lowerCamelCase__: List[str] =BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B") lowerCamelCase__: Any =["Sam"] lowerCamelCase__: Tuple =tokenizer(UpperCAmelCase_ , return_tensors="jax") lowerCamelCase__: Optional[Any] =model.generate(**UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Any ="Sam is a great name. It means \"sun\" in Gaelic." lowerCamelCase__: Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_ , **UpperCAmelCase_) assert generated_txt[0].strip() == tgt_text
10
0
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class __UpperCAmelCase : def __init__( self : Optional[Any], __A : Dict, __A : Tuple=1_3, __A : int=7, __A : Dict=False, __A : Dict=True, __A : Optional[int]=False, __A : List[Any]=False, __A : Union[str, Any]=1_9, __A : Dict=3_2, __A : Optional[Any]=5, __A : Any=4, __A : int=3_7, __A : Union[str, Any]="gelu", __A : List[Any]=0.1, __A : Tuple=0.1, __A : Any=5_1_2, __A : Tuple=1_6, __A : Optional[Any]=2, __A : Optional[Any]=0.0_2, __A : str=3, __A : Optional[int]=4, __A : Tuple=None, ): UpperCAmelCase : Tuple = parent UpperCAmelCase : List[str] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : List[str] = is_training UpperCAmelCase : Optional[int] = use_input_mask UpperCAmelCase : int = use_token_type_ids UpperCAmelCase : int = use_labels UpperCAmelCase : Any = vocab_size UpperCAmelCase : Optional[Any] = hidden_size UpperCAmelCase : Optional[Any] = num_hidden_layers UpperCAmelCase : Union[str, Any] = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : int = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : List[Any] = attention_probs_dropout_prob UpperCAmelCase : Optional[Any] = max_position_embeddings UpperCAmelCase : str = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : Dict = initializer_range UpperCAmelCase : Any = num_labels UpperCAmelCase : int = num_choices UpperCAmelCase : Tuple = scope def __magic_name__ ( self : List[str] ): UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase : Optional[Any] = None if self.use_input_mask: UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Optional[int] = None UpperCAmelCase : Optional[int] = None UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) UpperCAmelCase : List[str] = ids_tensor([self.batch_size], self.num_choices ) UpperCAmelCase : Optional[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Dict = EsmConfig( vocab_size=3_3, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=UpperCAmelCase_, esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False}, ) return config def __magic_name__ ( self : Optional[Any], __A : int, __A : List[str], __A : str, __A : List[str], __A : List[str], __A : str ): UpperCAmelCase : str = EsmForProteinFolding(config=UpperCAmelCase_ ).float() model.to(UpperCAmelCase_ ) model.eval() UpperCAmelCase : str = model(UpperCAmelCase_, attention_mask=UpperCAmelCase_ ) UpperCAmelCase : Optional[int] = model(UpperCAmelCase_ ) UpperCAmelCase : Any = model(UpperCAmelCase_ ) self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 1_4, 3) ) self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2) ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( UpperCAmelCase ) : Optional[Any] = config_and_inputs UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): UpperCamelCase = False UpperCamelCase = (EsmForProteinFolding,) if is_torch_available() else () UpperCamelCase = () UpperCamelCase = {} if is_torch_available() else {} UpperCamelCase = False def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Optional[Any] = EsmFoldModelTester(self ) UpperCAmelCase : str = ConfigTester(self, config_class=UpperCAmelCase_, hidden_size=3_7 ) def __magic_name__ ( self : Union[str, Any] ): self.config_tester.run_common_tests() def __magic_name__ ( self : List[str] ): UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) @unittest.skip('''Does not support attention outputs''' ) def __magic_name__ ( self : List[Any] ): pass @unittest.skip def __magic_name__ ( self : int ): pass @unittest.skip('''Esm does not support embedding resizing''' ) def __magic_name__ ( self : str ): pass @unittest.skip('''Esm does not support embedding resizing''' ) def __magic_name__ ( self : Optional[int] ): pass @unittest.skip('''ESMFold does not support passing input embeds!''' ) def __magic_name__ ( self : Dict ): pass @unittest.skip('''ESMFold does not support head pruning.''' ) def __magic_name__ ( self : Optional[Any] ): pass @unittest.skip('''ESMFold does not support head pruning.''' ) def __magic_name__ ( self : str ): pass @unittest.skip('''ESMFold does not support head pruning.''' ) def __magic_name__ ( self : Any ): pass @unittest.skip('''ESMFold does not support head pruning.''' ) def __magic_name__ ( self : Optional[Any] ): pass @unittest.skip('''ESMFold does not support head pruning.''' ) def __magic_name__ ( self : Union[str, Any] ): pass @unittest.skip('''ESMFold does not output hidden states in the normal way.''' ) def __magic_name__ ( self : List[str] ): pass @unittest.skip('''ESMfold does not output hidden states in the normal way.''' ) def __magic_name__ ( self : List[str] ): pass @unittest.skip('''ESMFold only has one output format.''' ) def __magic_name__ ( self : Dict ): pass @unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' ) def __magic_name__ ( self : List[str] ): pass @unittest.skip('''ESMFold does not support input chunking.''' ) def __magic_name__ ( self : Dict ): pass @unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' ) def __magic_name__ ( self : int ): pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def __magic_name__ ( self : str ): pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def __magic_name__ ( self : Any ): pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def __magic_name__ ( self : List[Any] ): pass @unittest.skip('''ESMFold doesn\'t support data parallel.''' ) def __magic_name__ ( self : Dict ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __magic_name__ ( self : Union[str, Any] ): pass @require_torch class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): @slow def __magic_name__ ( self : Any ): UpperCAmelCase : Optional[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float() model.eval() UpperCAmelCase : Any = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) UpperCAmelCase : int = model(UpperCAmelCase_ )["positions"] UpperCAmelCase : Tuple = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4], dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], UpperCAmelCase_, atol=1E-4 ) )
336
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = "▁" __A = {"vocab_file": "prophetnet.tokenizer"} __A = { "vocab_file": { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer" ), } } __A = { "microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False}, } __A = { "microsoft/xprophetnet-large-wiki100-cased": 512, } def lowerCAmelCase_ ( __a ) -> int: """simple docstring""" lowerCamelCase__: Optional[Any] =collections.OrderedDict() with open(__a , "r" , encoding="utf-8" ) as reader: lowerCamelCase__: int =reader.readlines() for index, token in enumerate(__a ): lowerCamelCase__: List[str] =token.rstrip("\n" ) lowerCamelCase__: List[Any] =index return vocab class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = ["input_ids", "attention_mask"] def __init__(self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : int="[UNK]" , UpperCAmelCase_ : Optional[Any]="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ) ->None: '''simple docstring''' lowerCamelCase__: int ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , ) try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece") raise lowerCamelCase__: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(UpperCAmelCase_)) lowerCamelCase__: Optional[int] =vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab lowerCamelCase__: Optional[int] ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4} for i in range(10): lowerCamelCase__: Optional[int] =F"""[unused{i}]""" lowerCamelCase__: int =5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab lowerCamelCase__: int =12 lowerCamelCase__: Optional[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(UpperCAmelCase_) def __getstate__(self : List[str]) ->Dict: '''simple docstring''' lowerCamelCase__: Optional[int] =self.__dict__.copy() lowerCamelCase__: Dict =None return state def __setstate__(self : List[str] , UpperCAmelCase_ : Union[str, Any]) ->Dict: '''simple docstring''' lowerCamelCase__: Tuple =d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece") raise # for backward compatibility if not hasattr(self , "sp_model_kwargs"): lowerCamelCase__: Dict ={} lowerCamelCase__: Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_) if token_ids_a is None: return ([0] * len(UpperCAmelCase_)) + [1] return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1] def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' lowerCamelCase__: Any =[self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def SCREAMING_SNAKE_CASE_ (self : str) ->Dict: '''simple docstring''' return len(self.sp_model) + self.fairseq_offset def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple: '''simple docstring''' lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->str: '''simple docstring''' return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]) ->str: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCamelCase__: str =self.sp_model.PieceToId(UpperCAmelCase_) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip() return out_string def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCAmelCase_): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return lowerCamelCase__: List[str] =os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCAmelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCAmelCase_ , "wb") as fi: lowerCamelCase__: Dict =self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_) return (out_vocab_file,) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.sep_token_id] lowerCamelCase__: Union[str, Any] =[self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
10
0
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__a , __a ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = emb.weight.shape SCREAMING_SNAKE_CASE : Tuple = nn.Linear(__a , __a , bias=__a ) SCREAMING_SNAKE_CASE : int = emb.weight.data return lin_layer def __A ( lowerCamelCase_ , lowerCamelCase_=None ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = {} for old_key in state_dict.keys(): SCREAMING_SNAKE_CASE : List[Any] = old_key if "moe_layer.experts." in key: if expert_idx is not None: SCREAMING_SNAKE_CASE : int = key.replace("""moe_layer.experts.0""" , f'''ffn.experts.expert_{expert_idx}''' ) else: SCREAMING_SNAKE_CASE : Optional[Any] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: SCREAMING_SNAKE_CASE : int = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: SCREAMING_SNAKE_CASE : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: SCREAMING_SNAKE_CASE : Tuple = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: SCREAMING_SNAKE_CASE : int = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: SCREAMING_SNAKE_CASE : Optional[int] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: SCREAMING_SNAKE_CASE : List[str] = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) SCREAMING_SNAKE_CASE : List[str] = state_dict[old_key] return new_dict def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = WEIGHTS_NAME ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = 0 os.makedirs(__a , exist_ok=__a ) for expert in range(__a ): SCREAMING_SNAKE_CASE : Any = switch_checkpoint_path + f'''-rank-{expert}.pt''' if os.path.isfile(__a ): SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(__a )["model"] remove_ignore_keys_(__a ) SCREAMING_SNAKE_CASE : int = rename_fairseq_keys(__a , __a ) SCREAMING_SNAKE_CASE : Any = os.path.join( __a , weights_name.replace(""".bin""" , f'''-{len(__a )+1:05d}-of-???.bin''' ) ) torch.save(__a , __a ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__a )[0]].dtype ) # Add the last block SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(__a , weights_name.replace(""".bin""" , f'''-{len(__a )+1:05d}-of-???.bin''' ) ) SCREAMING_SNAKE_CASE : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["model"] remove_ignore_keys_(__a ) SCREAMING_SNAKE_CASE : Any = rename_fairseq_keys(__a , __a ) SCREAMING_SNAKE_CASE : Optional[Any] = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__a ) == 1: SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(__a , __a ) torch.save(__a , __a ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__a , __a ) # Otherwise, let's build the index SCREAMING_SNAKE_CASE : Dict = {} for idx, shard in enumerate(__a ): SCREAMING_SNAKE_CASE : str = weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-{len(__a ):05d}.bin''' ) SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(__a , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__a , os.path.join(__a , __a ) ) for key in shard: SCREAMING_SNAKE_CASE : List[str] = shard_file # Add the metadata SCREAMING_SNAKE_CASE : List[str] = {"total_size": total_size} SCREAMING_SNAKE_CASE : List[Any] = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(__a , __a ) , """w""" , encoding="""utf-8""" ) as f: SCREAMING_SNAKE_CASE : str = json.dumps(__a , indent=2 , sort_keys=__a ) + "\n" f.write(__a ) return metadata, index if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) __UpperCAmelCase = parser.parse_args() __UpperCAmelCase , __UpperCAmelCase = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) __UpperCAmelCase = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) __UpperCAmelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
323
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
0
from random import randint, random def lowerCAmelCase__( lowercase : Tuple , lowercase : Dict , lowercase : Union[str, Any] , lowercase : Tuple = False , lowercase : List[Any] = False , lowercase : Tuple = 5 , ) -> list: __snake_case : Tuple = [[-1] * number_of_cells] # Create a highway without any car __snake_case : Dict = 0 __snake_case : Any = max(__a , 0 ) while i < number_of_cells: __snake_case : str = ( randint(0 , __a ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : Any ) -> int: __snake_case : Optional[int] = 0 __snake_case : Optional[int] = highway_now[car_index + 1 :] for cell in range(len(__a ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(__a , -1 ) def lowerCAmelCase__( lowercase : int , lowercase : Tuple , lowercase : Optional[Any] ) -> list: __snake_case : Optional[int] = len(__a ) # Beforce calculations, the highway is empty __snake_case : Dict = [-1] * number_of_cells for car_index in range(__a ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed __snake_case : int = min(highway_now[car_index] + 1 , __a ) # Number of empty cell before the next car __snake_case : Union[str, Any] = get_distance(__a , __a ) - 1 # We can't have the car causing an accident __snake_case : Dict = min(next_highway[car_index] , __a ) if random() < probability: # Randomly, a driver will slow down __snake_case : int = max(next_highway[car_index] - 1 , 0 ) return next_highway def lowerCAmelCase__( lowercase : Tuple , lowercase : Dict , lowercase : Dict , lowercase : Optional[int] ) -> list: __snake_case : Dict = len(highway[0] ) for i in range(__a ): __snake_case : Dict = update(highway[i] , __a , __a ) __snake_case : List[str] = [-1] * number_of_cells for car_index in range(__a ): __snake_case : Any = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) __snake_case : Dict = (car_index + speed) % number_of_cells # Commit the change of position __snake_case : Optional[Any] = speed highway.append(__a ) return highway if __name__ == "__main__": import doctest doctest.testmod()
326
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { "configuration_distilbert": [ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertOnnxConfig", ], "tokenization_distilbert": ["DistilBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["DistilBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", "DistilBertForSequenceClassification", "DistilBertForTokenClassification", "DistilBertModel", "DistilBertPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertMainLayer", "TFDistilBertModel", "TFDistilBertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxDistilBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
0
'''simple docstring''' import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path __lowerCamelCase = [ {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''}, {'''dataset''': '''snli''', '''config_name''': '''plain_text'''}, {'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''}, {'''dataset''': '''wiki40b''', '''config_name''': '''en'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''}, {'''dataset''': '''natural_questions''', '''config_name''': '''default'''}, ] def UpperCAmelCase__ ( UpperCAmelCase__=True ) -> int: if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__SCREAMING_SNAKE_CASE ) ) class A__ ( __SCREAMING_SNAKE_CASE ): lowercase = None lowercase = None def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' with TemporaryDirectory() as tmp_dir: A_ = dataset_module_factory(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ) A_ = import_main_class(dataset_module.module_path , dataset=UpperCAmelCase_ ) A_ = builder_cls( cache_dir=UpperCAmelCase_ , config_name=UpperCAmelCase_ , hash=dataset_module.hash , ) A_ = "/".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=UpperCAmelCase_ ).replace(os.sep , """/""" ), config.DATASET_INFO_FILENAME, ] ) A_ = cached_path(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ) self.assertTrue(os.path.exists(UpperCAmelCase_ ) ) @pytest.mark.integration def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict: A_ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / "test_wikipedia_simple" A_ = dataset_module_factory("""wikipedia""", cache_dir=__a ) A_ = import_main_class(dataset_module.module_path ) A_ = builder_cls( cache_dir=__a, config_name="""20220301.frr""", hash=dataset_module.hash, ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam A_ = None builder_instance.download_and_prepare() A_ = builder_instance.as_dataset() assert ds @pytest.mark.integration def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]: A_ = dataset_module_factory("""wikipedia""", cache_dir=__a ) A_ = import_main_class(dataset_module.module_path, dataset=__a ) A_ = builder_cls( cache_dir=__a, config_name="""20220301.frr""", hash=dataset_module.hash, ) A_ = builder_instance.as_streaming_dataset() assert ds assert isinstance(__a, __a ) assert "train" in ds assert isinstance(ds["""train"""], __a ) assert next(iter(ds["""train"""] ) )
162
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) lowercase_ = Features({"image": Image()} ) lowercase_ = Features({"labels": ClassLabel} ) lowercase_ = "image" lowercase_ = "labels" def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple: '''simple docstring''' if self.label_column not in features: raise ValueError(F"""Column {self.label_column} is not present in features.""") if not isinstance(features[self.label_column] , UpperCAmelCase_): raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""") lowerCamelCase__: List[Any] =copy.deepcopy(self) lowerCamelCase__: Optional[int] =self.label_schema.copy() lowerCamelCase__: int =features[self.label_column] lowerCamelCase__: int =label_schema return task_template @property def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]: '''simple docstring''' return { self.image_column: "image", self.label_column: "labels", }
10
0
from __future__ import annotations lowerCamelCase : int = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Optional[Any] , lowercase : str , lowercase : Optional[Any] , lowercase : Any , ): '''simple docstring''' lowerCamelCase_ = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__a ) ) ] # the reference grid lowerCamelCase_ = 1 lowerCamelCase_ = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__a ) ) ] # the action grid lowerCamelCase_ = init[0] lowerCamelCase_ = init[1] lowerCamelCase_ = 0 lowerCamelCase_ = g + heuristic[x][y] # cost from starting cell to destination cell lowerCamelCase_ = [[f, g, x, y]] lowerCamelCase_ = False # flag that is set when search is complete lowerCamelCase_ = False # flag set if we can't find expand while not found and not resign: if len(__a ) == 0: raise ValueError('Algorithm is unable to find solution' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() lowerCamelCase_ = cell.pop() lowerCamelCase_ = next_cell[2] lowerCamelCase_ = next_cell[3] lowerCamelCase_ = next_cell[1] if x == goal[0] and y == goal[1]: lowerCamelCase_ = True else: for i in range(len(__a ) ): # to try out different valid actions lowerCamelCase_ = x + DIRECTIONS[i][0] lowerCamelCase_ = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__a ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: lowerCamelCase_ = g + cost lowerCamelCase_ = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) lowerCamelCase_ = 1 lowerCamelCase_ = i lowerCamelCase_ = [] lowerCamelCase_ = goal[0] lowerCamelCase_ = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: lowerCamelCase_ = x - DIRECTIONS[action[x][y]][0] lowerCamelCase_ = y - DIRECTIONS[action[x][y]][1] lowerCamelCase_ = xa lowerCamelCase_ = ya invpath.append([x, y] ) lowerCamelCase_ = [] for i in range(len(__a ) ): path.append(invpath[len(__a ) - 1 - i] ) return path, action if __name__ == "__main__": lowerCamelCase : List[str] = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] lowerCamelCase : Tuple = [0, 0] # all coordinates are given in format [y,x] lowerCamelCase : Optional[int] = [len(grid) - 1, len(grid[0]) - 1] lowerCamelCase : Tuple = 1 # the cost map which pushes the path closer to the goal lowerCamelCase : List[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): lowerCamelCase : str = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map lowerCamelCase : Optional[int] = 99 lowerCamelCase , lowerCamelCase : str = search(grid, init, goal, cost, heuristic) print("ACTION MAP") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
204
import logging from transformers.configuration_utils import PretrainedConfig __A = logging.getLogger(__name__) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "masked_bert" def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]: '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Optional[int] =vocab_size lowerCamelCase__: Dict =hidden_size lowerCamelCase__: Optional[int] =num_hidden_layers lowerCamelCase__: Any =num_attention_heads lowerCamelCase__: List[Any] =hidden_act lowerCamelCase__: str =intermediate_size lowerCamelCase__: Dict =hidden_dropout_prob lowerCamelCase__: str =attention_probs_dropout_prob lowerCamelCase__: int =max_position_embeddings lowerCamelCase__: Tuple =type_vocab_size lowerCamelCase__: str =initializer_range lowerCamelCase__: List[Any] =layer_norm_eps lowerCamelCase__: str =pruning_method lowerCamelCase__: Union[str, Any] =mask_init lowerCamelCase__: Optional[Any] =mask_scale
10
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Tuple: lowerCamelCase : str = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase : Dict = get_activation("gelu" ) self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase_ ) , torch_builtin(UpperCAmelCase_ ) ) ) self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase_ ) , gelu_new(UpperCAmelCase_ ) ) ) def _lowercase ( self ) -> str: lowerCamelCase : List[str] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase : str = get_activation("gelu" ) lowerCamelCase : Union[str, Any] = get_activation("gelu_10" ) lowerCamelCase : Dict = torch_builtin(UpperCAmelCase_ ) lowerCamelCase : Any = geluaa(UpperCAmelCase_ ) lowerCamelCase : Union[str, Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(UpperCAmelCase_ ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _lowercase ( self ) -> Union[str, Any]: get_activation("gelu" ) get_activation("gelu_10" ) get_activation("gelu_fast" ) get_activation("gelu_new" ) get_activation("gelu_python" ) get_activation("gelu_pytorch_tanh" ) get_activation("linear" ) get_activation("mish" ) get_activation("quick_gelu" ) get_activation("relu" ) get_activation("sigmoid" ) get_activation("silu" ) get_activation("swish" ) get_activation("tanh" ) with self.assertRaises(UpperCAmelCase_ ): get_activation("bogus" ) with self.assertRaises(UpperCAmelCase_ ): get_activation(UpperCAmelCase_ ) def _lowercase ( self ) -> Tuple: lowerCamelCase : Dict = get_activation("gelu" ) lowerCamelCase : str = 1 lowerCamelCase : Union[str, Any] = get_activation("gelu" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(UpperCAmelCase_ ): lowerCamelCase : Union[str, Any] = acta.a
48
class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Any =n lowerCamelCase__: Tuple =[None] * self.n lowerCamelCase__: str =0 # index of the first element lowerCamelCase__: Tuple =0 lowerCamelCase__: Optional[Any] =0 def __len__(self : str) ->int: '''simple docstring''' return self.size def SCREAMING_SNAKE_CASE_ (self : int) ->bool: '''simple docstring''' return self.size == 0 def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str: '''simple docstring''' return False if self.is_empty() else self.array[self.front] def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str: '''simple docstring''' if self.size >= self.n: raise Exception("QUEUE IS FULL") lowerCamelCase__: List[Any] =data lowerCamelCase__: Dict =(self.rear + 1) % self.n self.size += 1 return self def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple: '''simple docstring''' if self.size == 0: raise Exception("UNDERFLOW") lowerCamelCase__: Optional[Any] =self.array[self.front] lowerCamelCase__: Optional[int] =None lowerCamelCase__: Dict =(self.front + 1) % self.n self.size -= 1 return temp
10
0
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def _lowerCAmelCase ( )->Optional[Any]: '''simple docstring''' snake_case_ = argparse.ArgumentParser() parser.add_argument( "-m" , "--pretrained_model_name_or_path" , type=__a , default=__a , required=__a , help="Path to pretrained model or model identifier from huggingface.co/models." , ) parser.add_argument( "-c" , "--caption" , type=__a , default="robotic cat with wings" , help="Text used to generate images." , ) parser.add_argument( "-n" , "--images_num" , type=__a , default=4 , help="How much images to generate." , ) parser.add_argument( "-s" , "--seed" , type=__a , default=42 , help="Seed for random process." , ) parser.add_argument( "-ci" , "--cuda_id" , type=__a , default=0 , help="cuda_id." , ) snake_case_ = parser.parse_args() return args def _lowerCAmelCase ( lowerCAmelCase_ :Any , lowerCAmelCase_ :Dict , lowerCAmelCase_ :Union[str, Any] )->Tuple: '''simple docstring''' if not len(__a ) == rows * cols: raise ValueError("The specified number of rows and columns are not correct." ) snake_case_ = imgs[0].size snake_case_ = Image.new("RGB" , size=(cols * w, rows * h) ) snake_case_ = grid.size for i, img in enumerate(__a ): grid.paste(__a , box=(i % cols * w, i // cols * h) ) return grid def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Any="robotic cat with wings" , lowerCAmelCase_ :List[Any]=7.5 , lowerCAmelCase_ :Optional[int]=50 , lowerCAmelCase_ :str=1 , lowerCAmelCase_ :int=42 , )->str: '''simple docstring''' snake_case_ = torch.Generator(pipeline.device ).manual_seed(__a ) snake_case_ = pipeline( __a , guidance_scale=__a , num_inference_steps=__a , generator=__a , num_images_per_prompt=__a , ).images snake_case_ = int(math.sqrt(__a ) ) snake_case_ = image_grid(__a , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images SCREAMING_SNAKE_CASE :int = parse_args() # Load models and create wrapper for stable diffusion SCREAMING_SNAKE_CASE :Union[str, Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') SCREAMING_SNAKE_CASE :Dict = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') SCREAMING_SNAKE_CASE :int = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') SCREAMING_SNAKE_CASE :List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') SCREAMING_SNAKE_CASE :str = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) SCREAMING_SNAKE_CASE :int = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): SCREAMING_SNAKE_CASE :Dict = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: SCREAMING_SNAKE_CASE :Tuple = unet.to(torch.device('''cuda''', args.cuda_id)) SCREAMING_SNAKE_CASE :List[str] = pipeline.to(unet.device) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :int = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) SCREAMING_SNAKE_CASE :List[str] = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
159
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) def lowerCAmelCase_ ( __a ) -> YolosConfig: """simple docstring""" lowerCamelCase__: str =YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCamelCase__: int =192 lowerCamelCase__: Optional[int] =768 lowerCamelCase__: Any =12 lowerCamelCase__: str =3 lowerCamelCase__: Optional[int] =[800, 1333] lowerCamelCase__: Union[str, Any] =False elif yolos_name == "yolos_s_dWr": lowerCamelCase__: int =330 lowerCamelCase__: Optional[Any] =14 lowerCamelCase__: Any =6 lowerCamelCase__: List[str] =1320 elif "yolos_s" in yolos_name: lowerCamelCase__: List[str] =384 lowerCamelCase__: Union[str, Any] =1536 lowerCamelCase__: List[Any] =12 lowerCamelCase__: Any =6 elif "yolos_b" in yolos_name: lowerCamelCase__: str =[800, 1344] lowerCamelCase__: int =91 lowerCamelCase__: str ="huggingface/label-files" lowerCamelCase__: List[str] ="coco-detection-id2label.json" lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) ) lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()} lowerCamelCase__: List[str] =idalabel lowerCamelCase__: int ={v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict: """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :] lowerCamelCase__: str =in_proj_bias[: config.hidden_size] lowerCamelCase__: str =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__: str =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :] lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :] def lowerCAmelCase_ ( __a ) -> str: """simple docstring""" if "backbone" in name: lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" ) if "cls_token" in name: lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" ) if "norm1" in name: lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" ) return name def lowerCAmelCase_ ( __a , __a ) -> dict: """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase__: Any =orig_state_dict.pop(__a ) if "qkv" in key: lowerCamelCase__: Tuple =key.split("." ) lowerCamelCase__: List[str] =int(key_split[2] ) lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCamelCase__: int =val[:dim, :] lowerCamelCase__: str =val[ dim : dim * 2, : ] lowerCamelCase__: Any =val[-dim:, :] else: lowerCamelCase__: Tuple =val[:dim] lowerCamelCase__: Optional[Any] =val[dim : dim * 2] lowerCamelCase__: str =val[-dim:] else: lowerCamelCase__: Dict =val return orig_state_dict def lowerCAmelCase_ ( ) -> torch.Tensor: """simple docstring""" lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]: """simple docstring""" lowerCamelCase__: int =get_yolos_config(__a ) # load original state_dict lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"] # load 🤗 model lowerCamelCase__: int =YolosForObjectDetection(__a ) model.eval() lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a ) model.load_state_dict(__a ) # Check outputs on an image, prepared by YolosImageProcessor lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512 lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a ) lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" ) lowerCamelCase__: Tuple =model(**__a ) lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes lowerCamelCase__ , lowerCamelCase__: Any =None, None if yolos_name == "yolos_ti": lowerCamelCase__: Optional[Any] =torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) lowerCamelCase__: List[Any] =torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": lowerCamelCase__: Optional[int] =torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) lowerCamelCase__: Any =torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": lowerCamelCase__: str =torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) lowerCamelCase__: Optional[Any] =torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": lowerCamelCase__: str =torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) lowerCamelCase__: Union[str, Any] =torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": lowerCamelCase__: Tuple =torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) lowerCamelCase__: Optional[int] =torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F"""Unknown yolos_name: {yolos_name}""" ) assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 ) Path(__a ).mkdir(exist_ok=__a ) print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__a ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__a ) if push_to_hub: lowerCamelCase__: Any ={ "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) lowerCamelCase__: Optional[int] =model_mapping[yolos_name] image_processor.push_to_hub(__a , organization="hustvl" ) model.push_to_hub(__a , organization="hustvl" ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __A = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
10
0
from __future__ import annotations def UpperCAmelCase_ ( __snake_case ) -> bool: """simple docstring""" return len(set(__a ) ) == len(__a ) if __name__ == "__main__": import doctest doctest.testmod()
5
from math import ceil, sqrt def lowerCAmelCase_ ( __a = 1000000 ) -> int: """simple docstring""" lowerCamelCase__: Optional[int] =0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowerCamelCase__: str =1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'{solution() = }')
10
0
def lowerCamelCase__ ( __lowerCamelCase : Tuple = 1000 ): return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
114
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]: """simple docstring""" assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]: """simple docstring""" lowerCamelCase__: int =tmp_path / "cache" lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features lowerCamelCase__: Union[str, Any] =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Union[str, Any] =tmp_path / "cache" lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_parquet_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Dict: """simple docstring""" if issubclass(__a , __a ): lowerCamelCase__: str =parquet_path elif issubclass(__a , __a ): lowerCamelCase__: str =[parquet_path] lowerCamelCase__: Optional[Any] =tmp_path / "cache" lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]: """simple docstring""" assert isinstance(__a , __a ) for split in splits: lowerCamelCase__: Optional[Any] =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: List[str] =ParquetDatasetReader( {"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]: """simple docstring""" lowerCamelCase__: List[Any] =tmp_path / "cache" lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =features.copy() if features else default_expected_features lowerCamelCase__: Union[str, Any] =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]: """simple docstring""" if split: lowerCamelCase__: Union[str, Any] ={split: parquet_path} else: lowerCamelCase__: int ="train" lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path} lowerCamelCase__: int =tmp_path / "cache" lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( __a , __a ) -> Tuple: """simple docstring""" lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" ) lowerCamelCase__: Optional[int] =pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( __a , __a ) -> List[Any]: """simple docstring""" lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" ) lowerCamelCase__: Union[str, Any] ={"image": [image_path]} lowerCamelCase__: int =Features({"image": Image()} ) lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a ) lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( __a , __a ) -> Any: """simple docstring""" assert get_writer_batch_size(__a ) == expected
10
0
import numpy as np from PIL import Image def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> np.ndarray: """simple docstring""" snake_case_ : str = np.array(__a ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) snake_case_ : Optional[int] = 0 snake_case_ : Union[str, Any] = 0 snake_case_ : Union[str, Any] = 0 snake_case_ : List[Any] = 0 # compute the shape of the output matrix snake_case_ : Optional[Any] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape snake_case_ : Optional[int] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix snake_case_ : int = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 snake_case_ : Tuple = 0 snake_case_ : List[Any] = 0 return updated_arr def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> np.ndarray: """simple docstring""" snake_case_ : str = np.array(__a ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) snake_case_ : List[str] = 0 snake_case_ : List[Any] = 0 snake_case_ : List[str] = 0 snake_case_ : str = 0 # compute the shape of the output matrix snake_case_ : str = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape snake_case_ : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix snake_case_ : int = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 snake_case_ : Dict = 0 snake_case_ : List[str] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image lowerCAmelCase_ = Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
279
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __A = "." if __name__ == "__main__": __A = os.path.join(REPO_PATH, "utils/documentation_tests.txt") __A = [] __A = [] with open(doctest_file_path) as fp: for line in fp: __A = line.strip() __A = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __A = "\n".join(non_existent_paths) raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}') if all_paths != sorted(all_paths): raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
10
0
'''simple docstring''' import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() a : Optional[int] = logging.get_logger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = os.path.abspath(__a ) logger.info(F"Converting TensorFlow checkpoint from {tf_path}" ) # Load weights from TF model UpperCAmelCase : Any = tf.train.list_variables(__a ) UpperCAmelCase : Dict = [] UpperCAmelCase : List[Any] = [] UpperCAmelCase : Optional[int] = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") UpperCAmelCase : List[str] = full_name.split("/" ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F"Skipping non-model layer {full_name}" ) continue if "optimizer" in full_name: logger.info(F"Skipping optimization layer {full_name}" ) continue if name[0] == "model": # ignore initial 'model' UpperCAmelCase : int = name[1:] # figure out how many levels deep the name is UpperCAmelCase : Optional[int] = 0 for _name in name: if _name.startswith("layer_with_weights" ): depth += 1 else: break layer_depth.append(__a ) # read data UpperCAmelCase : List[Any] = tf.train.load_variable(__a , __a ) names.append("/".join(__a ) ) arrays.append(__a ) logger.info(F"Read a total of {len(__a ):,} layers" ) # Sanity check if len(set(__a ) ) != 1: raise ValueError(F"Found layer names with different depths (layer depth {list(set(__a ) )})" ) UpperCAmelCase : Optional[Any] = list(set(__a ) )[0] if layer_depth != 1: raise ValueError( "The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP" " heads." ) # convert layers logger.info("Converting weights..." ) for full_name, array in zip(__a , __a ): UpperCAmelCase : Tuple = full_name.split("/" ) UpperCAmelCase : Union[str, Any] = model UpperCAmelCase : Dict = [] for i, m_name in enumerate(__a ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("layer_with_weights" ): UpperCAmelCase : Optional[Any] = int(m_name.split("-" )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["embeddings", "LayerNorm"] ) UpperCAmelCase : Optional[Any] = getattr(__a , "embeddings" ) UpperCAmelCase : Union[str, Any] = getattr(__a , "LayerNorm" ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["encoder", "layer", str(layer_num - 4 )] ) UpperCAmelCase : Any = getattr(__a , "encoder" ) UpperCAmelCase : str = getattr(__a , "layer" ) UpperCAmelCase : Optional[Any] = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["pooler", "dense"] ) UpperCAmelCase : Any = getattr(__a , "pooler" ) UpperCAmelCase : Tuple = getattr(__a , "dense" ) elif m_name == "embeddings": trace.append("embeddings" ) UpperCAmelCase : List[Any] = getattr(__a , "embeddings" ) if layer_num == 0: trace.append("word_embeddings" ) UpperCAmelCase : List[str] = getattr(__a , "word_embeddings" ) elif layer_num == 1: trace.append("position_embeddings" ) UpperCAmelCase : str = getattr(__a , "position_embeddings" ) elif layer_num == 2: trace.append("token_type_embeddings" ) UpperCAmelCase : Dict = getattr(__a , "token_type_embeddings" ) else: raise ValueError(F"Unknown embedding layer with name {full_name}" ) trace.append("weight" ) UpperCAmelCase : Optional[int] = getattr(__a , "weight" ) elif m_name == "_attention_layer": # self-attention layer trace.extend(["attention", "self"] ) UpperCAmelCase : Any = getattr(__a , "attention" ) UpperCAmelCase : Tuple = getattr(__a , "self" ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["attention", "output", "LayerNorm"] ) UpperCAmelCase : Optional[int] = getattr(__a , "attention" ) UpperCAmelCase : Any = getattr(__a , "output" ) UpperCAmelCase : Union[str, Any] = getattr(__a , "LayerNorm" ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(["attention", "output", "dense"] ) UpperCAmelCase : List[str] = getattr(__a , "attention" ) UpperCAmelCase : Dict = getattr(__a , "output" ) UpperCAmelCase : str = getattr(__a , "dense" ) elif m_name == "_output_dense": # output dense trace.extend(["output", "dense"] ) UpperCAmelCase : Optional[Any] = getattr(__a , "output" ) UpperCAmelCase : Tuple = getattr(__a , "dense" ) elif m_name == "_output_layer_norm": # output dense trace.extend(["output", "LayerNorm"] ) UpperCAmelCase : List[Any] = getattr(__a , "output" ) UpperCAmelCase : Optional[Any] = getattr(__a , "LayerNorm" ) elif m_name == "_key_dense": # attention key trace.append("key" ) UpperCAmelCase : List[Any] = getattr(__a , "key" ) elif m_name == "_query_dense": # attention query trace.append("query" ) UpperCAmelCase : Tuple = getattr(__a , "query" ) elif m_name == "_value_dense": # attention value trace.append("value" ) UpperCAmelCase : Any = getattr(__a , "value" ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["intermediate", "dense"] ) UpperCAmelCase : Union[str, Any] = getattr(__a , "intermediate" ) UpperCAmelCase : List[str] = getattr(__a , "dense" ) elif m_name == "_output_layer_norm": # output layer norm trace.append("output" ) UpperCAmelCase : List[Any] = getattr(__a , "output" ) # weights & biases elif m_name in ["bias", "beta"]: trace.append("bias" ) UpperCAmelCase : Any = getattr(__a , "bias" ) elif m_name in ["kernel", "gamma"]: trace.append("weight" ) UpperCAmelCase : Any = getattr(__a , "weight" ) else: logger.warning(F"Ignored {m_name}" ) # for certain layers reshape is necessary UpperCAmelCase : Dict = ".".join(__a ) if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , __a ) or re.match( R"(\S+)\.attention\.output\.dense\.weight" , __a ): UpperCAmelCase : str = array.reshape(pointer.data.shape ) if "kernel" in full_name: UpperCAmelCase : Tuple = array.transpose() if pointer.shape == array.shape: UpperCAmelCase : Optional[int] = torch.from_numpy(__a ) else: raise ValueError( F"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:" F" {array.shape}" ) logger.info(F"Successfully set variable {full_name} to PyTorch layer {trace}" ) return model def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' logger.info(F"Loading model based on config from {config_path}..." ) UpperCAmelCase : str = BertConfig.from_json_file(__a ) UpperCAmelCase : Union[str, Any] = BertModel(__a ) # Load weights from checkpoint logger.info(F"Loading weights from checkpoint {tf_checkpoint_path}..." ) load_tfa_weights_in_bert(__a , __a , __a ) # Save pytorch-model logger.info(F"Saving PyTorch model to {pytorch_dump_path}..." ) torch.save(model.state_dict() , __a ) if __name__ == "__main__": a : str = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model (must include filename).", ) a : Union[str, Any] = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
311
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any: '''simple docstring''' super().__init__(**UpperCAmelCase_) if self.framework == "tf": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""") requires_backends(self , "vision") self.check_model_type(UpperCAmelCase_) def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]: '''simple docstring''' if "text_queries" in kwargs: lowerCamelCase__: Any =kwargs.pop("text_queries") if isinstance(UpperCAmelCase_ , (str, Image.Image)): lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels} else: lowerCamelCase__: Any =image lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_) return results def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict: '''simple docstring''' lowerCamelCase__: List[str] ={} if "threshold" in kwargs: lowerCamelCase__: List[Any] =kwargs["threshold"] if "top_k" in kwargs: lowerCamelCase__: Any =kwargs["top_k"] return {}, {}, postprocess_params def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: List[Any] =load_image(inputs["image"]) lowerCamelCase__: Dict =inputs["candidate_labels"] if isinstance(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: Any =candidate_labels.split(",") lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(UpperCAmelCase_): lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework) lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework) yield { "is_last": i == len(UpperCAmelCase_) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Dict =model_inputs.pop("target_size") lowerCamelCase__: Dict =model_inputs.pop("candidate_label") lowerCamelCase__: Dict =model_inputs.pop("is_last") lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_) lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple: '''simple docstring''' lowerCamelCase__: Union[str, Any] =[] for model_output in model_outputs: lowerCamelCase__: Optional[Any] =model_output["candidate_label"] lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_) lowerCamelCase__: Dict =self.image_processor.post_process_object_detection( outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0] for index in outputs["scores"].nonzero(): lowerCamelCase__: Dict =outputs["scores"][index].item() lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0]) lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box} results.append(UpperCAmelCase_) lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_) if top_k: lowerCamelCase__: Dict =results[:top_k] return results def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.") lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist() lowerCamelCase__: Optional[int] ={ "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
10
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): UpperCamelCase = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) UpperCamelCase = Features({"""image""": Image()} ) UpperCamelCase = Features({"""labels""": ClassLabel} ) UpperCamelCase = """image""" UpperCamelCase = """labels""" def __magic_name__ ( self : Tuple, __A : Union[str, Any] ): if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column], UpperCAmelCase_ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) UpperCAmelCase : List[Any] = copy.deepcopy(self ) UpperCAmelCase : Optional[int] = self.label_schema.copy() UpperCAmelCase : int = features[self.label_column] UpperCAmelCase : int = label_schema return task_template @property def __magic_name__ ( self : Dict ): return { self.image_column: "image", self.label_column: "labels", }
336
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = (DDPMParallelScheduler,) def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any: '''simple docstring''' lowerCamelCase__: Any ={ "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**UpperCAmelCase_) return config def SCREAMING_SNAKE_CASE_ (self : int) ->Dict: '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]: '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple: '''simple docstring''' self.check_over_configs(thresholding=UpperCAmelCase_) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , ) def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]: '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int) ->int: '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str: '''simple docstring''' lowerCamelCase__: Dict =self.scheduler_classes[0] lowerCamelCase__: Tuple =self.get_scheduler_config() lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5 def SCREAMING_SNAKE_CASE_ (self : Any) ->str: '''simple docstring''' lowerCamelCase__: int =self.scheduler_classes[0] lowerCamelCase__: Tuple =self.get_scheduler_config() lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: str =len(UpperCAmelCase_) lowerCamelCase__: Optional[int] =self.dummy_model() lowerCamelCase__: int =self.dummy_sample_deter lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1 lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1 lowerCamelCase__: Optional[Any] =samplea.shape[0] lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0) lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_) lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1)) lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1)) lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_)) lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_)) assert abs(result_sum.item() - 1153.1833) < 1E-2 assert abs(result_mean.item() - 0.5005) < 1E-3 def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Any =self.scheduler_classes[0] lowerCamelCase__: Optional[Any] =self.get_scheduler_config() lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =self.dummy_model() lowerCamelCase__: List[Any] =self.dummy_sample_deter lowerCamelCase__: int =torch.manual_seed(0) for t in reversed(range(UpperCAmelCase_)): # 1. predict noise residual lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_) # 2. predict previous mean of sample x_t-1 lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample lowerCamelCase__: Any =pred_prev_sample lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_)) lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_)) assert abs(result_sum.item() - 258.9606) < 1E-2 assert abs(result_mean.item() - 0.3372) < 1E-3 def SCREAMING_SNAKE_CASE_ (self : int) ->Any: '''simple docstring''' lowerCamelCase__: Tuple =self.scheduler_classes[0] lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction") lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: str =len(UpperCAmelCase_) lowerCamelCase__: str =self.dummy_model() lowerCamelCase__: str =self.dummy_sample_deter lowerCamelCase__: Dict =torch.manual_seed(0) for t in reversed(range(UpperCAmelCase_)): # 1. predict noise residual lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_) # 2. predict previous mean of sample x_t-1 lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample lowerCamelCase__: List[str] =pred_prev_sample lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_)) lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_)) assert abs(result_sum.item() - 202.0296) < 1E-2 assert abs(result_mean.item() - 0.2631) < 1E-3 def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]: '''simple docstring''' lowerCamelCase__: str =self.scheduler_classes[0] lowerCamelCase__: Union[str, Any] =self.get_scheduler_config() lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =scheduler.timesteps for i, timestep in enumerate(UpperCAmelCase_): if i == len(UpperCAmelCase_) - 1: lowerCamelCase__: Dict =-1 else: lowerCamelCase__: Union[str, Any] =timesteps[i + 1] lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_) lowerCamelCase__: str =prev_t.item() self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Tuple =self.scheduler_classes[0] lowerCamelCase__: List[Any] =self.get_scheduler_config() lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0] with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__: Dict =self.scheduler_classes[0] lowerCamelCase__: Any =self.get_scheduler_config() lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0] lowerCamelCase__: int =len(UpperCAmelCase_) with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any: '''simple docstring''' lowerCamelCase__: Tuple =self.scheduler_classes[0] lowerCamelCase__: Optional[Any] =self.get_scheduler_config() lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCAmelCase_)
10
0
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if gpta_config_file == "": SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig() else: SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig.from_json_file(__a ) SCREAMING_SNAKE_CASE : Any = GPTaModel(__a ) # Load weights from numpy load_tf_weights_in_gpta(__a , __a , __a ) # Save pytorch-model SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + "/" + WEIGHTS_NAME SCREAMING_SNAKE_CASE : Optional[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict() , __a ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--gpt2_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) __UpperCAmelCase = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
323
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowerCAmelCase_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841 lowerCamelCase__: List[Any] =[ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] lowerCamelCase__: List[str] =defaultdict(__a ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) lowerCamelCase__: List[str] =mst(__a ) lowerCamelCase__: Union[str, Any] =[ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: lowerCamelCase__: Optional[int] =tuple(answer[:2] ) lowerCamelCase__: List[Any] =tuple(edge[::-1] ) assert edge in result or reverse in result
10
0
from collections import defaultdict def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : Optional[Any] ) -> bool: __snake_case : Union[str, Any] = first_str.lower().strip() __snake_case : Optional[Any] = second_str.lower().strip() # Remove whitespace __snake_case : int = first_str.replace(" " , "" ) __snake_case : int = second_str.replace(" " , "" ) # Strings of different lengths are not anagrams if len(__a ) != len(__a ): return False # Default values for count should be 0 __snake_case : defaultdict[str, int] = defaultdict(__a ) # For each character in input strings, # increment count in the corresponding for i in range(len(__a ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() _UpperCamelCase = input('''Enter the first string ''').strip() _UpperCamelCase = input('''Enter the second string ''').strip() _UpperCamelCase = check_anagrams(input_a, input_b) print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
326
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/test_sentencepiece_bpe.model") class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = BartphoTokenizer lowercase_ = False lowercase_ = True def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple: '''simple docstring''' super().setUp() lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"] lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) lowerCamelCase__: List[Any] ={"unk_token": "<unk>"} lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"]) with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp: for token in vocab_tokens: fp.write(F"""{token} {vocab_tokens[token]}\n""") lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str: '''simple docstring''' kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__: Optional[int] ="This is a là test" lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test" return input_text, output_text def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map) lowerCamelCase__: List[Any] ="This is a là test" lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split() lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token] lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
10
0
'''simple docstring''' import requests from bsa import BeautifulSoup def UpperCAmelCase__ ( UpperCAmelCase__ = "https://www.worldometers.info/coronavirus" ) -> dict: A_ = BeautifulSoup(requests.get(__a ).text, """html.parser""" ) A_ = soup.findAll("""h1""" ) A_ = soup.findAll("""div""", {"""class""": """maincounter-number"""} ) keys += soup.findAll("""span""", {"""class""": """panel-title"""} ) values += soup.findAll("""div""", {"""class""": """number-table-main"""} ) return {key.text.strip(): value.text.strip() for key, value in zip(__a, __a )} if __name__ == "__main__": print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''') for key, value in world_covidaa_stats().items(): print(f"""{key}\n{value}\n""")
162
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } __A = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]: """simple docstring""" for attribute in key.split("." ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models lowerCamelCase__: Optional[int] ="lm_head" lowerCamelCase__: Dict =getattr(__a , __a ) if weight_type is not None: lowerCamelCase__: str =getattr(__a , __a ).shape else: lowerCamelCase__: int =hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCamelCase__: Dict =value elif weight_type == "weight_g": lowerCamelCase__: Optional[Any] =value elif weight_type == "weight_v": lowerCamelCase__: int =value elif weight_type == "bias": lowerCamelCase__: List[str] =value else: lowerCamelCase__: Union[str, Any] =value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: List[Any] =[] lowerCamelCase__: List[str] =fairseq_model.state_dict() lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase__: int =False if "conv_layers" in name: load_conv_layer( __a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , ) lowerCamelCase__: str =True else: for key, mapped_key in MAPPING.items(): lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCamelCase__: Optional[Any] =True if "*" in mapped_key: lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2] lowerCamelCase__: List[str] =mapped_key.replace("*" , __a ) if "weight_g" in name: lowerCamelCase__: List[str] ="weight_g" elif "weight_v" in name: lowerCamelCase__: Union[str, Any] ="weight_v" elif "bias" in name: lowerCamelCase__: Dict ="bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase__: Tuple ="weight" else: lowerCamelCase__: List[Any] =None set_recursively(__a , __a , __a , __a , __a , __a ) continue if not is_used: unused_weights.append(__a ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]: """simple docstring""" lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1] lowerCamelCase__: List[str] =name.split("." ) lowerCamelCase__: str =int(items[0] ) lowerCamelCase__: Union[str, Any] =int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowerCamelCase__: List[str] =value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowerCamelCase__: Dict =value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowerCamelCase__: List[Any] =value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowerCamelCase__: List[str] =value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__a ) @torch.no_grad() def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int: """simple docstring""" if config_path is not None: lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a ) else: lowerCamelCase__: List[Any] =UniSpeechConfig() if is_finetuned: if dict_path: lowerCamelCase__: str =Dictionary.load_from_json(__a ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase__: Any =target_dict.pad_index lowerCamelCase__: int =target_dict.bos_index lowerCamelCase__: Any =target_dict.eos_index lowerCamelCase__: Dict =len(target_dict.symbols ) lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" ) if not os.path.isdir(__a ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) ) return os.makedirs(__a , exist_ok=__a ) lowerCamelCase__: Optional[Any] =target_dict.indices # fairseq has the <pad> and <s> switched lowerCamelCase__: Optional[Any] =42 lowerCamelCase__: List[Any] =43 with open(__a , "w" , encoding="utf-8" ) as vocab_handle: json.dump(__a , __a ) lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer( __a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , ) lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False lowerCamelCase__: Tuple =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , ) lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a ) processor.save_pretrained(__a ) lowerCamelCase__: int =UniSpeechForCTC(__a ) else: lowerCamelCase__: int =UniSpeechForPreTraining(__a ) if is_finetuned: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} ) else: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) lowerCamelCase__: List[str] =model[0].eval() recursively_load_weights(__a , __a , __a ) hf_unispeech.save_pretrained(__a ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __A = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
10
0
import requests from bsa import BeautifulSoup def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] = "AAPL" ): '''simple docstring''' lowerCamelCase_ = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" lowerCamelCase_ = BeautifulSoup(requests.get(__a ).text , 'html.parser' ) lowerCamelCase_ = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find('div' , class_=class_ ).find('span' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
204
from typing import Any def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list: """simple docstring""" _validation( __a , __a , __a , __a , __a , ) # Creates data structures and fill initial step lowerCamelCase__: dict ={} lowerCamelCase__: dict ={} for state in states_space: lowerCamelCase__: Optional[Any] =observations_space[0] lowerCamelCase__: List[Any] =( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCamelCase__: int =None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(__a ) ): lowerCamelCase__: Tuple =observations_space[o] lowerCamelCase__: Optional[Any] =observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCamelCase__: Tuple ="" lowerCamelCase__: Optional[Any] =-1 for k_state in states_space: lowerCamelCase__: int =( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCamelCase__: List[str] =probability lowerCamelCase__: int =k_state # Update probabilities and pointers dicts lowerCamelCase__: Any =( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCamelCase__: int =arg_max # The final observation lowerCamelCase__: Any =observations_space[len(__a ) - 1] # argmax for given final observation lowerCamelCase__: Optional[Any] ="" lowerCamelCase__: int =-1 for k_state in states_space: lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)] if probability > max_probability: lowerCamelCase__: List[Any] =probability lowerCamelCase__: Dict =k_state lowerCamelCase__: str =arg_max # Process pointers backwards lowerCamelCase__: Union[str, Any] =last_state lowerCamelCase__: List[str] =[] for o in range(len(__a ) - 1 , -1 , -1 ): result.append(__a ) lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]] result.reverse() return result def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None: """simple docstring""" _validate_not_empty( __a , __a , __a , __a , __a , ) _validate_lists(__a , __a ) _validate_dicts( __a , __a , __a ) def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None: """simple docstring""" if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("There's an empty parameter" ) def lowerCAmelCase_ ( __a , __a ) -> None: """simple docstring""" _validate_list(__a , "observations_space" ) _validate_list(__a , "states_space" ) def lowerCAmelCase_ ( __a , __a ) -> None: """simple docstring""" if not isinstance(_object , __a ): lowerCamelCase__: Tuple =F"""{var_name} must be a list""" raise ValueError(__a ) else: for x in _object: if not isinstance(__a , __a ): lowerCamelCase__: str =F"""{var_name} must be a list of strings""" raise ValueError(__a ) def lowerCAmelCase_ ( __a , __a , __a , ) -> None: """simple docstring""" _validate_dict(__a , "initial_probabilities" , __a ) _validate_nested_dict(__a , "transition_probabilities" ) _validate_nested_dict(__a , "emission_probabilities" ) def lowerCAmelCase_ ( __a , __a ) -> None: """simple docstring""" _validate_dict(_object , __a , __a ) for x in _object.values(): _validate_dict(__a , __a , __a , __a ) def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None: """simple docstring""" if not isinstance(_object , __a ): lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict""" raise ValueError(__a ) if not all(isinstance(__a , __a ) for x in _object ): lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings""" raise ValueError(__a ) if not all(isinstance(__a , __a ) for x in _object.values() ): lowerCamelCase__: Dict ="nested dictionary " if nested else "" lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}""" raise ValueError(__a ) if __name__ == "__main__": from doctest import testmod testmod()
10
0
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : Optional[int] = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) lowerCamelCase : List[Any] = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above lowerCamelCase : str = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above lowerCamelCase : Any = tf_top_k_top_p_filtering(UpperCAmelCase_ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) lowerCamelCase : List[str] = output[output != -float("inf" )] lowerCamelCase : List[Any] = tf.cast( tf.where(tf.not_equal(UpperCAmelCase_ , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-12 ) tf.debugging.assert_equal(UpperCAmelCase_ , UpperCAmelCase_ ) @require_tf class UpperCamelCase__ (unittest.TestCase , __SCREAMING_SNAKE_CASE ): '''simple docstring''' if is_tf_available(): lowerCamelCase_ : List[str] = { """AutoModelForCausalLM""": TFAutoModelForCausalLM, """AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq, """AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM, """AutoModelForVision2Seq""": TFAutoModelForVisionaSeq, """LogitsProcessorList""": TFLogitsProcessorList, """MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor, """create_tensor_fn""": tf.convert_to_tensor, """floats_tensor""": floats_tensor, """return_tensors""": """tf""", } @slow def _lowercase ( self ) -> Dict: lowerCamelCase : Optional[int] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowerCamelCase : Any = 2 lowerCamelCase : List[Any] = 2 class UpperCamelCase__ (tf.Module ): '''simple docstring''' def __init__( self , UpperCamelCase__ ) -> Union[str, Any]: super(UpperCAmelCase_ , self ).__init__() lowerCamelCase : Union[str, Any] = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ), tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ), ) , jit_compile=UpperCAmelCase_ , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int: lowerCamelCase : Any = self.model.generate( input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , max_new_tokens=UpperCAmelCase_ , return_dict_in_generate=UpperCAmelCase_ , ) return {"sequences": outputs["sequences"]} lowerCamelCase : Dict = [[2, 0], [102, 103]] lowerCamelCase : List[Any] = [[1, 0], [1, 1]] lowerCamelCase : Union[str, Any] = DummyModel(model=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase_ , UpperCAmelCase_ , signatures={"serving_default": dummy_model.serving} ) lowerCamelCase : Tuple = tf.saved_model.load(UpperCAmelCase_ ).signatures["serving_default"] for batch_size in range(1 , len(UpperCAmelCase_ ) + 1 ): lowerCamelCase : Optional[Any] = { "input_ids": tf.constant(dummy_input_ids[:batch_size] ), "attention_mask": tf.constant(dummy_attention_masks[:batch_size] ), } lowerCamelCase : int = serving_func(**UpperCAmelCase_ )["sequences"] lowerCamelCase : int = test_model.generate(**UpperCAmelCase_ , max_new_tokens=UpperCAmelCase_ ) tf.debugging.assert_equal(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def _lowercase ( self ) -> Dict: lowerCamelCase : Tuple = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowerCamelCase : Tuple = 1 lowerCamelCase : int = 2 class UpperCamelCase__ (tf.Module ): '''simple docstring''' def __init__( self , UpperCamelCase__ ) -> Tuple: super(UpperCAmelCase_ , self ).__init__() lowerCamelCase : str = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ), ) , jit_compile=UpperCAmelCase_ , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: lowerCamelCase : Dict = self.model.generate( input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , max_new_tokens=UpperCAmelCase_ , return_dict_in_generate=UpperCAmelCase_ , ) return {"sequences": outputs["sequences"]} lowerCamelCase : Dict = [[2], [102, 103]] lowerCamelCase : Tuple = [[1], [1, 1]] lowerCamelCase : List[str] = DummyModel(model=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase_ , UpperCAmelCase_ , signatures={"serving_default": dummy_model.serving} ) lowerCamelCase : Dict = tf.saved_model.load(UpperCAmelCase_ ).signatures["serving_default"] for input_row in range(len(UpperCAmelCase_ ) ): lowerCamelCase : Optional[Any] = { "input_ids": tf.constant([dummy_input_ids[input_row]] ), "attention_mask": tf.constant([dummy_attention_masks[input_row]] ), } lowerCamelCase : List[Any] = serving_func(**UpperCAmelCase_ )["sequences"] lowerCamelCase : Optional[Any] = test_model.generate(**UpperCAmelCase_ , max_new_tokens=UpperCAmelCase_ ) tf.debugging.assert_equal(UpperCAmelCase_ , UpperCAmelCase_ ) @slow @require_tensorflow_text def _lowercase ( self ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=UpperCAmelCase_ ) class UpperCamelCase__ (tf.keras.layers.Layer ): '''simple docstring''' def __init__( self ) -> Optional[int]: super().__init__() lowerCamelCase : Dict = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase_ , "spiece.model" ) , "rb" ).read() ) lowerCamelCase : Dict = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" ) def _lowercase ( self , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]: lowerCamelCase : Any = self.tokenizer.tokenize(UpperCAmelCase_ ) lowerCamelCase : Dict = text.pad_model_inputs( UpperCAmelCase_ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) lowerCamelCase : Dict = self.model.generate(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) return self.tokenizer.detokenize(UpperCAmelCase_ ) lowerCamelCase : int = CompleteSentenceTransformer() lowerCamelCase : Dict = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" ) lowerCamelCase : Optional[Any] = complete_model(UpperCAmelCase_ ) lowerCamelCase : Optional[int] = tf.keras.Model(UpperCAmelCase_ , UpperCAmelCase_ ) keras_model.save(UpperCAmelCase_ ) def _lowercase ( self ) -> int: lowerCamelCase : str = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } lowerCamelCase : Any = 14 lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowerCamelCase : int = "Hello, my dog is cute and" lowerCamelCase : Optional[int] = tokenizer(UpperCAmelCase_ , return_tensors="tf" ) lowerCamelCase : Tuple = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowerCamelCase : Union[str, Any] = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0" ): tf.random.set_seed(0 ) lowerCamelCase : int = model.generate(**UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) lowerCamelCase : List[str] = [638, 198] with tf.device(":/CPU:0" ): tf.random.set_seed(0 ) lowerCamelCase : Dict = model.generate(**UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" ) lowerCamelCase : Optional[int] = "Hugging Face is a technology company based in New York and Paris." lowerCamelCase : Any = bart_tokenizer(UpperCAmelCase_ , return_tensors="tf" ).input_ids lowerCamelCase : Optional[int] = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" ) lowerCamelCase : Optional[int] = bart_model.generate(UpperCAmelCase_ ).numpy() class UpperCamelCase__ (__SCREAMING_SNAKE_CASE ): '''simple docstring''' def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> Any: return super().call(UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCamelCase : Optional[Any] = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" ) lowerCamelCase : Union[str, Any] = bart_model.generate(UpperCAmelCase_ , foo="bar" ).numpy() self.assertTrue(np.array_equal(UpperCAmelCase_ , UpperCAmelCase_ ) ) class UpperCamelCase__ (bart_model.model.encoder.__class__ ): '''simple docstring''' def _lowercase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]: return super().call(UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCamelCase : Union[str, Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) lowerCamelCase : int = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) lowerCamelCase : Any = bart_model.generate(UpperCAmelCase_ ).numpy() with self.assertRaises(UpperCAmelCase_ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCAmelCase_ , foo="bar" )
48
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/unispeech-large-1500h-cv": ( "https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "unispeech" def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str: '''simple docstring''' super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =hidden_size lowerCamelCase__: List[str] =feat_extract_norm lowerCamelCase__: Dict =feat_extract_activation lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_) lowerCamelCase__: Any =list(UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_) lowerCamelCase__: Dict =conv_bias lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings lowerCamelCase__: Dict =num_conv_pos_embedding_groups lowerCamelCase__: int =len(self.conv_dim) lowerCamelCase__: Union[str, Any] =num_hidden_layers lowerCamelCase__: Union[str, Any] =intermediate_size lowerCamelCase__: Dict =hidden_act lowerCamelCase__: List[Any] =num_attention_heads lowerCamelCase__: Dict =hidden_dropout lowerCamelCase__: Optional[Any] =attention_dropout lowerCamelCase__: Optional[Any] =activation_dropout lowerCamelCase__: Tuple =feat_proj_dropout lowerCamelCase__: int =final_dropout lowerCamelCase__: Optional[Any] =layerdrop lowerCamelCase__: Dict =layer_norm_eps lowerCamelCase__: Optional[Any] =initializer_range lowerCamelCase__: int =num_ctc_classes lowerCamelCase__: Tuple =vocab_size lowerCamelCase__: Dict =do_stable_layer_norm lowerCamelCase__: List[Any] =use_weighted_layer_sum lowerCamelCase__: Dict =classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase__: int =apply_spec_augment lowerCamelCase__: List[str] =mask_time_prob lowerCamelCase__: Union[str, Any] =mask_time_length lowerCamelCase__: List[Any] =mask_time_min_masks lowerCamelCase__: Any =mask_feature_prob lowerCamelCase__: Optional[Any] =mask_feature_length lowerCamelCase__: List[str] =mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCamelCase__: Optional[Any] =num_codevectors_per_group lowerCamelCase__: str =num_codevector_groups lowerCamelCase__: Tuple =contrastive_logits_temperature lowerCamelCase__: int =feat_quantizer_dropout lowerCamelCase__: Any =num_negatives lowerCamelCase__: List[str] =codevector_dim lowerCamelCase__: Union[str, Any] =proj_codevector_dim lowerCamelCase__: Any =diversity_loss_weight # ctc loss lowerCamelCase__: Any =ctc_loss_reduction lowerCamelCase__: Dict =ctc_zero_infinity # pretraining loss lowerCamelCase__: Dict =replace_prob @property def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1)
10
0
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class __lowerCAmelCase : """simple docstring""" def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : Tuple ) -> Dict: """simple docstring""" raise NotImplementedError() def lowerCAmelCase__ ( self : Tuple ) -> Any: """simple docstring""" raise NotImplementedError() class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self : Dict , _lowerCAmelCase : "AutoTokenizer" , _lowerCAmelCase : bool = False , **_lowerCAmelCase : int ) -> List[Any]: """simple docstring""" snake_case_ = tokenizer snake_case_ = skip_prompt snake_case_ = decode_kwargs # variables used in the streaming process snake_case_ = [] snake_case_ = 0 snake_case_ = True def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : List[Any] ) -> Any: """simple docstring""" if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("TextStreamer only supports batch size 1" ) elif len(value.shape ) > 1: snake_case_ = value[0] if self.skip_prompt and self.next_tokens_are_prompt: snake_case_ = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) snake_case_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("\n" ): snake_case_ = text[self.print_len :] snake_case_ = [] snake_case_ = 0 # If the last token is a CJK character, we print the characters. elif len(UpperCAmelCase_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ): snake_case_ = text[self.print_len :] self.print_len += len(UpperCAmelCase_ ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: snake_case_ = text[self.print_len : text.rfind(" " ) + 1] self.print_len += len(UpperCAmelCase_ ) self.on_finalized_text(UpperCAmelCase_ ) def lowerCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" if len(self.token_cache ) > 0: snake_case_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) snake_case_ = text[self.print_len :] snake_case_ = [] snake_case_ = 0 else: snake_case_ = "" snake_case_ = True self.on_finalized_text(UpperCAmelCase_ , stream_end=UpperCAmelCase_ ) def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ) -> int: """simple docstring""" print(UpperCAmelCase_ , flush=UpperCAmelCase_ , end="" if not stream_end else None ) def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self : Union[str, Any] , _lowerCAmelCase : "AutoTokenizer" , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[float] = None , **_lowerCAmelCase : Optional[int] ) -> Optional[Any]: """simple docstring""" super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = Queue() snake_case_ = None snake_case_ = timeout def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ) -> Union[str, Any]: """simple docstring""" self.text_queue.put(UpperCAmelCase_ , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self : List[str] ) -> Dict: """simple docstring""" return self def lowerCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" snake_case_ = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
159
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float: """simple docstring""" lowerCamelCase__: str =a while True: lowerCamelCase__: Optional[Any] =Decimal(__a ) - ( Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__a ) ) < precision: # noqa: S307 return float(__a ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}') # Find Square Root of 5 print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}') # Exponential Roots print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
10
0
import re def UpperCAmelCase_ ( __snake_case ) -> str: """simple docstring""" if len(re.findall('''[ATCG]''' , __a ) ) != len(__a ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
5
import itertools import math def lowerCAmelCase_ ( __a ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase_ ( ) -> str: """simple docstring""" lowerCamelCase__: Optional[int] =2 while True: if is_prime(__a ): yield num num += 1 def lowerCAmelCase_ ( __a = 10001 ) -> int: """simple docstring""" return next(itertools.islice(prime_generator() , nth - 1 , __a ) ) if __name__ == "__main__": print(f'{solution() = }')
10
0
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] ): def update_area_of_max_square(__lowerCamelCase : int , __lowerCamelCase : Any ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 __UpperCAmelCase : Union[str, Any] = update_area_of_max_square(__a , col + 1 ) __UpperCAmelCase : Dict = update_area_of_max_square(row + 1 , col + 1 ) __UpperCAmelCase : List[str] = update_area_of_max_square(row + 1 , __a ) if mat[row][col]: __UpperCAmelCase : Tuple = 1 + min([right, diagonal, down] ) __UpperCAmelCase : str = max(largest_square_area[0] , __a ) return sub_problem_sol else: return 0 __UpperCAmelCase : Dict = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : str ): def update_area_of_max_square_using_dp_array( __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] __UpperCAmelCase : str = update_area_of_max_square_using_dp_array(__a , col + 1 , __a ) __UpperCAmelCase : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __a ) __UpperCAmelCase : Dict = update_area_of_max_square_using_dp_array(row + 1 , __a , __a ) if mat[row][col]: __UpperCAmelCase : List[Any] = 1 + min([right, diagonal, down] ) __UpperCAmelCase : Optional[int] = max(largest_square_area[0] , __a ) __UpperCAmelCase : Tuple = sub_problem_sol return sub_problem_sol else: return 0 __UpperCAmelCase : Union[str, Any] = [0] __UpperCAmelCase : Optional[int] = [[-1] * cols for _ in range(__a )] update_area_of_max_square_using_dp_array(0 , 0 , __a ) return largest_square_area[0] def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ): __UpperCAmelCase : Optional[Any] = [[0] * (cols + 1) for _ in range(rows + 1 )] __UpperCAmelCase : Tuple = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __UpperCAmelCase : int = dp_array[row][col + 1] __UpperCAmelCase : Union[str, Any] = dp_array[row + 1][col + 1] __UpperCAmelCase : Any = dp_array[row + 1][col] if mat[row][col] == 1: __UpperCAmelCase : str = 1 + min(__a , __a , __a ) __UpperCAmelCase : Tuple = max(dp_array[row][col] , __a ) else: __UpperCAmelCase : Tuple = 0 return largest_square_area def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): __UpperCAmelCase : Union[str, Any] = [0] * (cols + 1) __UpperCAmelCase : Dict = [0] * (cols + 1) __UpperCAmelCase : List[Any] = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __UpperCAmelCase : Optional[int] = current_row[col + 1] __UpperCAmelCase : int = next_row[col + 1] __UpperCAmelCase : Optional[int] = next_row[col] if mat[row][col] == 1: __UpperCAmelCase : Dict = 1 + min(__a , __a , __a ) __UpperCAmelCase : Dict = max(current_row[col] , __a ) else: __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : List[Any] = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
114
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __init__(self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=0.9 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->str: '''simple docstring''' lowerCamelCase__: List[Any] =size if size is not None else {"shortest_edge": 30} lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 30, "width": 30} lowerCamelCase__: Any =parent lowerCamelCase__: Any =batch_size lowerCamelCase__: Optional[Any] =num_channels lowerCamelCase__: Tuple =min_resolution lowerCamelCase__: Union[str, Any] =max_resolution lowerCamelCase__: Union[str, Any] =do_resize_and_center_crop lowerCamelCase__: Optional[int] =size lowerCamelCase__: str =crop_pct lowerCamelCase__: Any =crop_size lowerCamelCase__: List[str] =do_normalize lowerCamelCase__: List[str] =image_mean lowerCamelCase__: Tuple =image_std def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]: '''simple docstring''' return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = PoolFormerImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Optional[int] =PoolFormerImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE_ (self : str) ->int: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop")) self.assertTrue(hasattr(UpperCAmelCase_ , "size")) self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct")) self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCAmelCase_ , "image_std")) def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]: '''simple docstring''' lowerCamelCase__: List[str] =self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"shortest_edge": 30}) self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30}) lowerCamelCase__: Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84}) def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any: '''simple docstring''' lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict) # create random PIL images lowerCamelCase__: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image) # Test not batched input lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCamelCase__: int =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict: '''simple docstring''' lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowerCamelCase__: Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray) # Test not batched input lowerCamelCase__: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCamelCase__: List[str] =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any: '''simple docstring''' lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor) # Test not batched input lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
10
0
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __lowerCAmelCase : lowerCamelCase_ : Tuple = XGLMConfig lowerCamelCase_ : List[str] = {} lowerCamelCase_ : Union[str, Any] = '''gelu''' def __init__(self , __magic_name__ , __magic_name__=14 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=2 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=0.02 , ) -> Optional[int]: '''simple docstring''' snake_case_ : str = parent snake_case_ : int = batch_size snake_case_ : List[str] = seq_length snake_case_ : int = is_training snake_case_ : int = use_input_mask snake_case_ : Tuple = use_labels snake_case_ : Any = vocab_size snake_case_ : Dict = d_model snake_case_ : Optional[int] = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : Dict = ffn_dim snake_case_ : Optional[int] = activation_function snake_case_ : str = activation_dropout snake_case_ : int = attention_dropout snake_case_ : Optional[Any] = max_position_embeddings snake_case_ : Optional[int] = initializer_range snake_case_ : str = None snake_case_ : List[Any] = 0 snake_case_ : str = 2 snake_case_ : Union[str, Any] = 1 def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) snake_case_ : str = None if self.use_input_mask: snake_case_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : Dict = self.get_config() snake_case_ : Optional[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def lowerCamelCase (self ) -> int: '''simple docstring''' return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCAmelCase_ , ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : str = self.prepare_config_and_inputs() ( snake_case_ ) : Union[str, Any] = config_and_inputs snake_case_ : str = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, unittest.TestCase ): lowerCamelCase_ : Optional[int] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase_ : str = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase_ : Any = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase_ : List[Any] = False lowerCamelCase_ : int = False lowerCamelCase_ : str = False def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = TFXGLMModelTester(self ) snake_case_ : str = ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37 ) def lowerCamelCase (self ) -> str: '''simple docstring''' self.config_tester.run_common_tests() @slow def lowerCamelCase (self ) -> Dict: '''simple docstring''' for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Dict = TFXGLMModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' super().test_resize_token_embeddings() @require_tf class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase (self , __magic_name__=True ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) snake_case_ : Optional[int] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off snake_case_ : Union[str, Any] = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on snake_case_ : List[str] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase_ ) @slow def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) snake_case_ : Tuple = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) snake_case_ : Union[str, Any] = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' ) snake_case_ : Dict = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): snake_case_ : str = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ , seed=[7, 0] ) snake_case_ : str = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase_ ) snake_case_ : Union[str, Any] = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @slow def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) snake_case_ : Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) snake_case_ : str = "left" # use different length sentences to test batching snake_case_ : List[Any] = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] snake_case_ : Optional[Any] = tokenizer(UpperCAmelCase_ , return_tensors='''tf''' , padding=UpperCAmelCase_ ) snake_case_ : Optional[int] = inputs["input_ids"] snake_case_ : str = model.generate(input_ids=UpperCAmelCase_ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 ) snake_case_ : Tuple = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids snake_case_ : Union[str, Any] = model.generate(input_ids=UpperCAmelCase_ , max_new_tokens=12 ) snake_case_ : Optional[Any] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids snake_case_ : Union[str, Any] = model.generate(input_ids=UpperCAmelCase_ , max_new_tokens=12 ) snake_case_ : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase_ ) snake_case_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase_ ) snake_case_ : int = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , [non_padded_sentence, padded_sentence] )
279
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json" ), }, } __A = { "yjernite/retribert-base-uncased": 512, } __A = { "yjernite/retribert-base-uncased": {"do_lower_case": True}, } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = RetriBertTokenizer lowercase_ = ["input_ids", "attention_mask"] def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]: '''simple docstring''' super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars ): lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type")) lowerCamelCase__: int =do_lower_case lowerCamelCase__: int =strip_accents lowerCamelCase__: List[str] =tokenize_chinese_chars lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_) lowerCamelCase__: Any =do_lower_case def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]: '''simple docstring''' lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' lowerCamelCase__: Tuple =[self.sep_token_id] lowerCamelCase__: Optional[int] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_) return tuple(UpperCAmelCase_)
10
0
'''simple docstring''' def lowercase ( ): '''simple docstring''' UpperCAmelCase : Tuple = [] UpperCAmelCase : Tuple = 1 while len(__a ) < 1e6: constant.append(str(__a ) ) i += 1 UpperCAmelCase : List[Any] = "".join(__a ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
311
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __A = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Any: """simple docstring""" if attention_mask is None: lowerCamelCase__: Optional[Any] =np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: lowerCamelCase__: Dict =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: lowerCamelCase__: Optional[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase__: Any =np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCamelCase__: List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=0.02 , ) ->Optional[int]: '''simple docstring''' lowerCamelCase__: int =parent lowerCamelCase__: List[str] =batch_size lowerCamelCase__: Optional[int] =seq_length lowerCamelCase__: Optional[Any] =is_training lowerCamelCase__: str =use_labels lowerCamelCase__: Optional[Any] =vocab_size lowerCamelCase__: int =hidden_size lowerCamelCase__: Dict =num_hidden_layers lowerCamelCase__: Any =num_attention_heads lowerCamelCase__: str =intermediate_size lowerCamelCase__: int =hidden_act lowerCamelCase__: Tuple =hidden_dropout_prob lowerCamelCase__: List[str] =attention_probs_dropout_prob lowerCamelCase__: Optional[int] =max_position_embeddings lowerCamelCase__: int =eos_token_id lowerCamelCase__: Union[str, Any] =pad_token_id lowerCamelCase__: List[str] =bos_token_id lowerCamelCase__: int =initializer_range def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size) lowerCamelCase__: str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1) lowerCamelCase__: int =shift_tokens_right(UpperCAmelCase_ , 1 , 2) lowerCamelCase__: Dict =BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , ) lowerCamelCase__: Any =prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) return config, inputs_dict def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: Dict =self.prepare_config_and_inputs() return config, inputs_dict def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Optional[Any] =20 lowerCamelCase__: Optional[int] =model_class_name(UpperCAmelCase_) lowerCamelCase__: str =model.encode(inputs_dict["input_ids"]) lowerCamelCase__ , lowerCamelCase__: List[Any] =( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4") lowerCamelCase__: Tuple =jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase__: Union[str, Any] =model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , ) lowerCamelCase__: Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4") lowerCamelCase__: Dict =model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , ) lowerCamelCase__: List[Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""") def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[str] =20 lowerCamelCase__: Optional[Any] =model_class_name(UpperCAmelCase_) lowerCamelCase__: Any =model.encode(inputs_dict["input_ids"]) lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowerCamelCase__: Optional[int] =jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Tuple =jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase__: List[Any] =model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , ) lowerCamelCase__: Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4") lowerCamelCase__: str =model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , ) lowerCamelCase__: Union[str, Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_) lowerCamelCase__: str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""") @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' lowercase_ = 99 def SCREAMING_SNAKE_CASE_ (self : Any) ->int: '''simple docstring''' lowerCamelCase__: Union[str, Any] =np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) lowerCamelCase__: Optional[Any] =input_ids.shape[0] lowerCamelCase__: List[str] =BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._get_config_and_data() lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_) lowerCamelCase__: Dict =lm_model(input_ids=UpperCAmelCase_) lowerCamelCase__: Dict =(batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Dict) ->str: '''simple docstring''' lowerCamelCase__: Optional[int] =BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) lowerCamelCase__: str =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_) lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa) lowerCamelCase__: Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa) lowerCamelCase__: List[str] =lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_) lowerCamelCase__: Optional[int] =(*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple: '''simple docstring''' lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa) lowerCamelCase__: Optional[int] =shift_tokens_right(UpperCAmelCase_ , 1 , 2) lowerCamelCase__: List[str] =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum() lowerCamelCase__: Tuple =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum() self.assertEqual(shifted.shape , input_ids.shape) self.assertEqual(UpperCAmelCase_ , n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0] , 2).all()) @require_flax class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = True lowercase_ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]: '''simple docstring''' lowerCamelCase__: List[Any] =FlaxBlenderbotModelTester(self) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_) @jax.jit def encode_jitted(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str]): return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_) with self.subTest("JIT Enabled"): lowerCamelCase__: Any =encode_jitted(**UpperCAmelCase_).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): lowerCamelCase__: Tuple =encode_jitted(**UpperCAmelCase_).to_tuple() self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_)) for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_): self.assertEqual(jitted_output.shape , output.shape) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_) lowerCamelCase__: List[Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"]) lowerCamelCase__: int ={ "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]): return model.decode( decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , ) with self.subTest("JIT Enabled"): lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple() self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_)) for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_): self.assertEqual(jitted_output.shape , output.shape) @slow def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]: '''simple docstring''' for model_class_name in self.all_model_classes: lowerCamelCase__: Optional[int] =model_class_name.from_pretrained("facebook/blenderbot-400M-distill") # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowerCamelCase__: int =np.ones((1, 1)) * model.config.eos_token_id lowerCamelCase__: str =model(UpperCAmelCase_) self.assertIsNotNone(UpperCAmelCase_) @unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.") @slow def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict: '''simple docstring''' lowerCamelCase__: Dict ={"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25} lowerCamelCase__: Union[str, Any] ={"skip_special_tokens": True, "clean_up_tokenization_spaces": True} lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase_) lowerCamelCase__: List[str] =BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B") lowerCamelCase__: Any =["Sam"] lowerCamelCase__: Tuple =tokenizer(UpperCAmelCase_ , return_tensors="jax") lowerCamelCase__: Optional[Any] =model.generate(**UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Any ="Sam is a great name. It means \"sun\" in Gaelic." lowerCamelCase__: Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_ , **UpperCAmelCase_) assert generated_txt[0].strip() == tgt_text
10
0
from graphs.minimum_spanning_tree_kruskal import kruskal def a__ ( ) -> str: UpperCAmelCase : Optional[int] = 9 UpperCAmelCase : Union[str, Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] UpperCAmelCase : Optional[Any] = kruskal(__a , __a ) UpperCAmelCase : str = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(__a ) == sorted(__a )
336
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = "▁" __A = {"vocab_file": "prophetnet.tokenizer"} __A = { "vocab_file": { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer" ), } } __A = { "microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False}, } __A = { "microsoft/xprophetnet-large-wiki100-cased": 512, } def lowerCAmelCase_ ( __a ) -> int: """simple docstring""" lowerCamelCase__: Optional[Any] =collections.OrderedDict() with open(__a , "r" , encoding="utf-8" ) as reader: lowerCamelCase__: int =reader.readlines() for index, token in enumerate(__a ): lowerCamelCase__: List[str] =token.rstrip("\n" ) lowerCamelCase__: List[Any] =index return vocab class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = ["input_ids", "attention_mask"] def __init__(self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : int="[UNK]" , UpperCAmelCase_ : Optional[Any]="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ) ->None: '''simple docstring''' lowerCamelCase__: int ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , ) try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece") raise lowerCamelCase__: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(UpperCAmelCase_)) lowerCamelCase__: Optional[int] =vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab lowerCamelCase__: Optional[int] ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4} for i in range(10): lowerCamelCase__: Optional[int] =F"""[unused{i}]""" lowerCamelCase__: int =5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab lowerCamelCase__: int =12 lowerCamelCase__: Optional[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(UpperCAmelCase_) def __getstate__(self : List[str]) ->Dict: '''simple docstring''' lowerCamelCase__: Optional[int] =self.__dict__.copy() lowerCamelCase__: Dict =None return state def __setstate__(self : List[str] , UpperCAmelCase_ : Union[str, Any]) ->Dict: '''simple docstring''' lowerCamelCase__: Tuple =d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece") raise # for backward compatibility if not hasattr(self , "sp_model_kwargs"): lowerCamelCase__: Dict ={} lowerCamelCase__: Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_) if token_ids_a is None: return ([0] * len(UpperCAmelCase_)) + [1] return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1] def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' lowerCamelCase__: Any =[self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def SCREAMING_SNAKE_CASE_ (self : str) ->Dict: '''simple docstring''' return len(self.sp_model) + self.fairseq_offset def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple: '''simple docstring''' lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->str: '''simple docstring''' return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]) ->str: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCamelCase__: str =self.sp_model.PieceToId(UpperCAmelCase_) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip() return out_string def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCAmelCase_): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return lowerCamelCase__: List[str] =os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCAmelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCAmelCase_ , "wb") as fi: lowerCamelCase__: Dict =self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_) return (out_vocab_file,) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.sep_token_id] lowerCamelCase__: Union[str, Any] =[self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
10
0
'''simple docstring''' import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self : str , *lowerCamelCase_ : Any , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[Any]=None , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = eval_examples SCREAMING_SNAKE_CASE : Dict = post_process_function def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : str = "eval" ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.eval_dataset if eval_dataset is None else eval_dataset SCREAMING_SNAKE_CASE : Tuple = self.get_eval_dataloader(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. SCREAMING_SNAKE_CASE : Optional[Any] = self.compute_metrics SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop SCREAMING_SNAKE_CASE : List[str] = time.time() try: SCREAMING_SNAKE_CASE : Union[str, Any] = eval_loop( UpperCAmelCase_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , metric_key_prefix=UpperCAmelCase_ , ) finally: SCREAMING_SNAKE_CASE : Any = compute_metrics SCREAMING_SNAKE_CASE : int = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( UpperCAmelCase_ , UpperCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default SCREAMING_SNAKE_CASE : Optional[int] = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions ) SCREAMING_SNAKE_CASE : Tuple = self.compute_metrics(UpperCAmelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): SCREAMING_SNAKE_CASE : str = metrics.pop(UpperCAmelCase_ ) metrics.update(output.metrics ) else: SCREAMING_SNAKE_CASE : Any = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(UpperCAmelCase_ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) SCREAMING_SNAKE_CASE : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase_ ) return metrics def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict=None , lowerCamelCase_ : str = "test" ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_test_dataloader(UpperCAmelCase_ ) # Temporarily disable metric computation, we will do it in the loop here. SCREAMING_SNAKE_CASE : Optional[int] = self.compute_metrics SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop SCREAMING_SNAKE_CASE : Optional[int] = time.time() try: SCREAMING_SNAKE_CASE : Dict = eval_loop( UpperCAmelCase_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , metric_key_prefix=UpperCAmelCase_ , ) finally: SCREAMING_SNAKE_CASE : Optional[int] = compute_metrics SCREAMING_SNAKE_CASE : str = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( UpperCAmelCase_ , UpperCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output SCREAMING_SNAKE_CASE : Union[str, Any] = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions , """predict""" ) SCREAMING_SNAKE_CASE : List[str] = self.compute_metrics(UpperCAmelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): SCREAMING_SNAKE_CASE : Optional[Any] = metrics.pop(UpperCAmelCase_ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase_ )
323
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
0
def lowerCAmelCase__( lowercase : Tuple ) -> list: if len(__a ) <= 1: return [tuple(__a )] __snake_case : Dict = [] def generate(lowercase : List[str] , lowercase : Optional[int] ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , __a ) for i in range(k - 1 ): if k % 2 == 0: # k is even __snake_case : Tuple = arr[k - 1], arr[i] else: # k is odd __snake_case : str = arr[k - 1], arr[0] generate(k - 1 , __a ) generate(len(__a ) , __a ) return res if __name__ == "__main__": _UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() _UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
326
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { "configuration_distilbert": [ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertOnnxConfig", ], "tokenization_distilbert": ["DistilBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["DistilBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", "DistilBertForSequenceClassification", "DistilBertForTokenClassification", "DistilBertModel", "DistilBertPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertMainLayer", "TFDistilBertModel", "TFDistilBertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxDistilBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
0
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __lowerCamelCase = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class A__ ( __SCREAMING_SNAKE_CASE ): def __init__( self , **UpperCamelCase__ ) -> Any: '''simple docstring''' super().__init__(**UpperCAmelCase_ ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , """vision""" ) self.check_model_type(UpperCAmelCase_ ) def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Union[str, Any]: '''simple docstring''' if "text_queries" in kwargs: A_ = kwargs.pop("""text_queries""" ) if isinstance(UpperCAmelCase_ , (str, Image.Image) ): A_ = {"image": image, "candidate_labels": candidate_labels} else: A_ = image A_ = super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ ) return results def snake_case_ ( self , **UpperCamelCase__ ) -> Dict: '''simple docstring''' A_ = {} if "threshold" in kwargs: A_ = kwargs["threshold"] if "top_k" in kwargs: A_ = kwargs["top_k"] return {}, {}, postprocess_params def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' A_ = load_image(inputs["""image"""] ) A_ = inputs["candidate_labels"] if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): A_ = candidate_labels.split(""",""" ) A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(UpperCAmelCase_ ): A_ = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework ) A_ = self.image_processor(UpperCAmelCase_ , return_tensors=self.framework ) yield { "is_last": i == len(UpperCAmelCase_ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def snake_case_ ( self , UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' A_ = model_inputs.pop("""target_size""" ) A_ = model_inputs.pop("""candidate_label""" ) A_ = model_inputs.pop("""is_last""" ) A_ = self.model(**UpperCAmelCase_ ) A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0.1 , UpperCamelCase__=None ) -> Tuple: '''simple docstring''' A_ = [] for model_output in model_outputs: A_ = model_output["candidate_label"] A_ = BaseModelOutput(UpperCAmelCase_ ) A_ = self.image_processor.post_process_object_detection( outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["""target_size"""] )[0] for index in outputs["scores"].nonzero(): A_ = outputs["scores"][index].item() A_ = self._get_bounding_box(outputs["""boxes"""][index][0] ) A_ = {"score": score, "label": label, "box": box} results.append(UpperCAmelCase_ ) A_ = sorted(UpperCAmelCase_ , key=lambda UpperCamelCase__ : x["score"] , reverse=UpperCAmelCase_ ) if top_k: A_ = results[:top_k] return results def snake_case_ ( self , UpperCamelCase__ ) -> Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" ) A_ = box.int().tolist() A_ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
162
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) lowercase_ = Features({"image": Image()} ) lowercase_ = Features({"labels": ClassLabel} ) lowercase_ = "image" lowercase_ = "labels" def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple: '''simple docstring''' if self.label_column not in features: raise ValueError(F"""Column {self.label_column} is not present in features.""") if not isinstance(features[self.label_column] , UpperCAmelCase_): raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""") lowerCamelCase__: List[Any] =copy.deepcopy(self) lowerCamelCase__: Optional[int] =self.label_schema.copy() lowerCamelCase__: int =features[self.label_column] lowerCamelCase__: int =label_schema return task_template @property def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]: '''simple docstring''' return { self.image_column: "image", self.label_column: "labels", }
10
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class A( __SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCamelCase = 42 class A( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' @register_to_config def __init__( self : str , A_ : int = 65536 , A_ : Optional[int] = None , A_ : int = 2 , A_ : int = 2 , A_ : int = 0 , A_ : str = "fourier" , A_ : bool = True , A_ : bool = False , A_ : float = 0.0 , A_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ : Tuple[str] = "UNetMidBlock1D" , A_ : str = None , A_ : Tuple[int] = (32, 32, 64) , A_ : str = None , A_ : int = 8 , A_ : int = 1 , A_ : bool = False , ) -> Tuple: """simple docstring""" super().__init__() lowerCamelCase_ = sample_size # time if time_embedding_type == "fourier": lowerCamelCase_ = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=UpperCAmelCase_ , log=UpperCAmelCase_ , flip_sin_to_cos=UpperCAmelCase_ ) lowerCamelCase_ = 2 * block_out_channels[0] elif time_embedding_type == "positional": lowerCamelCase_ = Timesteps( block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase_ , downscale_freq_shift=UpperCAmelCase_ ) lowerCamelCase_ = block_out_channels[0] if use_timestep_embedding: lowerCamelCase_ = block_out_channels[0] * 4 lowerCamelCase_ = TimestepEmbedding( in_channels=UpperCAmelCase_ , time_embed_dim=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , out_dim=block_out_channels[0] , ) lowerCamelCase_ = nn.ModuleList([] ) lowerCamelCase_ = None lowerCamelCase_ = nn.ModuleList([] ) lowerCamelCase_ = None # down lowerCamelCase_ = in_channels for i, down_block_type in enumerate(UpperCAmelCase_ ): lowerCamelCase_ = output_channel lowerCamelCase_ = block_out_channels[i] if i == 0: input_channel += extra_in_channels lowerCamelCase_ = i == len(UpperCAmelCase_ ) - 1 lowerCamelCase_ = get_down_block( UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(UpperCAmelCase_ ) # mid lowerCamelCase_ = get_mid_block( UpperCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase_ , add_downsample=UpperCAmelCase_ , ) # up lowerCamelCase_ = list(reversed(UpperCAmelCase_ ) ) lowerCamelCase_ = reversed_block_out_channels[0] if out_block_type is None: lowerCamelCase_ = out_channels else: lowerCamelCase_ = block_out_channels[0] for i, up_block_type in enumerate(UpperCAmelCase_ ): lowerCamelCase_ = output_channel lowerCamelCase_ = ( reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase_ ) - 1 else final_upsample_channels ) lowerCamelCase_ = i == len(UpperCAmelCase_ ) - 1 lowerCamelCase_ = get_up_block( UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(UpperCAmelCase_ ) lowerCamelCase_ = output_channel # out lowerCamelCase_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) lowerCamelCase_ = get_out_block( out_block_type=UpperCAmelCase_ , num_groups_out=UpperCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , ) def a__ ( self : Dict , A_ : torch.FloatTensor , A_ : Union[torch.Tensor, float, int] , A_ : bool = True , ) -> Union[UNetaDOutput, Tuple]: """simple docstring""" lowerCamelCase_ = timestep if not torch.is_tensor(UpperCAmelCase_ ): lowerCamelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(UpperCAmelCase_ ) and len(timesteps.shape ) == 0: lowerCamelCase_ = timesteps[None].to(sample.device ) lowerCamelCase_ = self.time_proj(UpperCAmelCase_ ) if self.config.use_timestep_embedding: lowerCamelCase_ = self.time_mlp(UpperCAmelCase_ ) else: lowerCamelCase_ = timestep_embed[..., None] lowerCamelCase_ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) lowerCamelCase_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down lowerCamelCase_ = () for downsample_block in self.down_blocks: lowerCamelCase_ = downsample_block(hidden_states=UpperCAmelCase_ , temb=UpperCAmelCase_ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: lowerCamelCase_ = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): lowerCamelCase_ = down_block_res_samples[-1:] lowerCamelCase_ = down_block_res_samples[:-1] lowerCamelCase_ = upsample_block(UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , temb=UpperCAmelCase_ ) # 5. post-process if self.out_block: lowerCamelCase_ = self.out_block(UpperCAmelCase_ , UpperCAmelCase_ ) if not return_dict: return (sample,) return UNetaDOutput(sample=UpperCAmelCase_ )
204
import logging from transformers.configuration_utils import PretrainedConfig __A = logging.getLogger(__name__) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "masked_bert" def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]: '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Optional[int] =vocab_size lowerCamelCase__: Dict =hidden_size lowerCamelCase__: Optional[int] =num_hidden_layers lowerCamelCase__: Any =num_attention_heads lowerCamelCase__: List[Any] =hidden_act lowerCamelCase__: str =intermediate_size lowerCamelCase__: Dict =hidden_dropout_prob lowerCamelCase__: str =attention_probs_dropout_prob lowerCamelCase__: int =max_position_embeddings lowerCamelCase__: Tuple =type_vocab_size lowerCamelCase__: str =initializer_range lowerCamelCase__: List[Any] =layer_norm_eps lowerCamelCase__: str =pruning_method lowerCamelCase__: Union[str, Any] =mask_init lowerCamelCase__: Optional[Any] =mask_scale
10
0
def A ( _SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : Optional[Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" lowerCamelCase : List[str] = "" lowerCamelCase : List[str] = "" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(__a ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring lowerCamelCase : List[Any] = 0, 0 # length[i] shows the length of palindromic substring with center i lowerCamelCase : Union[str, Any] = [1 for i in range(len(__a ) )] # for each character in new_string find corresponding palindromic string lowerCamelCase : Tuple = 0 for j in range(len(__a ) ): lowerCamelCase : Tuple = 1 if j > r else min(length[l + r - j] // 2 ,r - j + 1 ) while ( j - k >= 0 and j + k < len(__a ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 lowerCamelCase : Union[str, Any] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: lowerCamelCase : Dict = j - k + 1 # noqa: E741 lowerCamelCase : Union[str, Any] = j + k - 1 # update max_length and start position if max_length < length[j]: lowerCamelCase : Optional[Any] = length[j] lowerCamelCase : int = j # create that string lowerCamelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
48
class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Any =n lowerCamelCase__: Tuple =[None] * self.n lowerCamelCase__: str =0 # index of the first element lowerCamelCase__: Tuple =0 lowerCamelCase__: Optional[Any] =0 def __len__(self : str) ->int: '''simple docstring''' return self.size def SCREAMING_SNAKE_CASE_ (self : int) ->bool: '''simple docstring''' return self.size == 0 def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str: '''simple docstring''' return False if self.is_empty() else self.array[self.front] def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str: '''simple docstring''' if self.size >= self.n: raise Exception("QUEUE IS FULL") lowerCamelCase__: List[Any] =data lowerCamelCase__: Dict =(self.rear + 1) % self.n self.size += 1 return self def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple: '''simple docstring''' if self.size == 0: raise Exception("UNDERFLOW") lowerCamelCase__: Optional[Any] =self.array[self.front] lowerCamelCase__: Optional[int] =None lowerCamelCase__: Dict =(self.front + 1) % self.n self.size -= 1 return temp
10
0
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] ) @pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] ) @pytest.mark.parametrize("revision" , [None, "v2"] ) def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :List[str] , lowerCAmelCase_ :int )->Any: '''simple docstring''' snake_case_ = hf_hub_url(repo_id=__a , path=__a , revision=__a ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__a )}'''
159
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) def lowerCAmelCase_ ( __a ) -> YolosConfig: """simple docstring""" lowerCamelCase__: str =YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCamelCase__: int =192 lowerCamelCase__: Optional[int] =768 lowerCamelCase__: Any =12 lowerCamelCase__: str =3 lowerCamelCase__: Optional[int] =[800, 1333] lowerCamelCase__: Union[str, Any] =False elif yolos_name == "yolos_s_dWr": lowerCamelCase__: int =330 lowerCamelCase__: Optional[Any] =14 lowerCamelCase__: Any =6 lowerCamelCase__: List[str] =1320 elif "yolos_s" in yolos_name: lowerCamelCase__: List[str] =384 lowerCamelCase__: Union[str, Any] =1536 lowerCamelCase__: List[Any] =12 lowerCamelCase__: Any =6 elif "yolos_b" in yolos_name: lowerCamelCase__: str =[800, 1344] lowerCamelCase__: int =91 lowerCamelCase__: str ="huggingface/label-files" lowerCamelCase__: List[str] ="coco-detection-id2label.json" lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) ) lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()} lowerCamelCase__: List[str] =idalabel lowerCamelCase__: int ={v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict: """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :] lowerCamelCase__: str =in_proj_bias[: config.hidden_size] lowerCamelCase__: str =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__: str =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :] lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :] def lowerCAmelCase_ ( __a ) -> str: """simple docstring""" if "backbone" in name: lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" ) if "cls_token" in name: lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" ) if "norm1" in name: lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" ) return name def lowerCAmelCase_ ( __a , __a ) -> dict: """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase__: Any =orig_state_dict.pop(__a ) if "qkv" in key: lowerCamelCase__: Tuple =key.split("." ) lowerCamelCase__: List[str] =int(key_split[2] ) lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCamelCase__: int =val[:dim, :] lowerCamelCase__: str =val[ dim : dim * 2, : ] lowerCamelCase__: Any =val[-dim:, :] else: lowerCamelCase__: Tuple =val[:dim] lowerCamelCase__: Optional[Any] =val[dim : dim * 2] lowerCamelCase__: str =val[-dim:] else: lowerCamelCase__: Dict =val return orig_state_dict def lowerCAmelCase_ ( ) -> torch.Tensor: """simple docstring""" lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]: """simple docstring""" lowerCamelCase__: int =get_yolos_config(__a ) # load original state_dict lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"] # load 🤗 model lowerCamelCase__: int =YolosForObjectDetection(__a ) model.eval() lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a ) model.load_state_dict(__a ) # Check outputs on an image, prepared by YolosImageProcessor lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512 lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a ) lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" ) lowerCamelCase__: Tuple =model(**__a ) lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes lowerCamelCase__ , lowerCamelCase__: Any =None, None if yolos_name == "yolos_ti": lowerCamelCase__: Optional[Any] =torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) lowerCamelCase__: List[Any] =torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": lowerCamelCase__: Optional[int] =torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) lowerCamelCase__: Any =torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": lowerCamelCase__: str =torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) lowerCamelCase__: Optional[Any] =torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": lowerCamelCase__: str =torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) lowerCamelCase__: Union[str, Any] =torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": lowerCamelCase__: Tuple =torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) lowerCamelCase__: Optional[int] =torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F"""Unknown yolos_name: {yolos_name}""" ) assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 ) Path(__a ).mkdir(exist_ok=__a ) print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__a ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__a ) if push_to_hub: lowerCamelCase__: Any ={ "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) lowerCamelCase__: Optional[int] =model_mapping[yolos_name] image_processor.push_to_hub(__a , organization="hustvl" ) model.push_to_hub(__a , organization="hustvl" ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __A = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
10
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): SCREAMING_SNAKE_CASE__ = ShapEImgaImgPipeline SCREAMING_SNAKE_CASE__ = ['''image'''] SCREAMING_SNAKE_CASE__ = ['''image'''] SCREAMING_SNAKE_CASE__ = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] SCREAMING_SNAKE_CASE__ = False @property def __A (self ) -> Optional[Any]: return 3_2 @property def __A (self ) -> int: return 3_2 @property def __A (self ) -> Dict: return self.time_input_dim * 4 @property def __A (self ) -> Optional[Any]: return 8 @property def __A (self ) -> List[Any]: torch.manual_seed(0 ) _lowercase =CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) _lowercase =CLIPVisionModel(UpperCAmelCase_ ) return model @property def __A (self ) -> Tuple: _lowercase =CLIPImageProcessor( crop_size=2_2_4 , do_center_crop=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_resize=UpperCAmelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , ) return image_processor @property def __A (self ) -> int: torch.manual_seed(0 ) _lowercase ={ "num_attention_heads": 2, "attention_head_dim": 1_6, "embedding_dim": self.time_input_dim, "num_embeddings": 3_2, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "embedding_proj_norm_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } _lowercase =PriorTransformer(**UpperCAmelCase_ ) return model @property def __A (self ) -> List[Any]: torch.manual_seed(0 ) _lowercase ={ "param_shapes": ( (self.renderer_dim, 9_3), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 1_2, "background": ( 0.1, 0.1, 0.1, ), } _lowercase =ShapERenderer(**UpperCAmelCase_ ) return model def __A (self ) -> List[str]: _lowercase =self.dummy_prior _lowercase =self.dummy_image_encoder _lowercase =self.dummy_image_processor _lowercase =self.dummy_renderer _lowercase =HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1_0_2_4 , prediction_type='''sample''' , use_karras_sigmas=UpperCAmelCase_ , clip_sample=UpperCAmelCase_ , clip_sample_range=1.0 , ) _lowercase ={ "prior": prior, "image_encoder": image_encoder, "image_processor": image_processor, "renderer": renderer, "scheduler": scheduler, } return components def __A (self , UpperCAmelCase , UpperCAmelCase=0 ) -> Optional[Any]: _lowercase =floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) if str(UpperCAmelCase_ ).startswith('''mps''' ): _lowercase =torch.manual_seed(UpperCAmelCase_ ) else: _lowercase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) _lowercase ={ "image": input_image, "generator": generator, "num_inference_steps": 1, "frame_size": 3_2, "output_type": "np", } return inputs def __A (self ) -> Optional[Any]: _lowercase ="cpu" _lowercase =self.get_dummy_components() _lowercase =self.pipeline_class(**UpperCAmelCase_ ) _lowercase =pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase =pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) ) _lowercase =output.images[0] _lowercase =image[0, -3:, -3:, -1] assert image.shape == (2_0, 3_2, 3_2, 3) _lowercase =np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A (self ) -> Optional[int]: self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __A (self ) -> Dict: _lowercase =torch_device == "cpu" _lowercase =True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCAmelCase_ , relax_max_difference=UpperCAmelCase_ , ) def __A (self ) -> Optional[Any]: _lowercase =self.get_dummy_components() _lowercase =self.pipeline_class(**UpperCAmelCase_ ) _lowercase =pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase =1 _lowercase =2 _lowercase =self.get_dummy_inputs(UpperCAmelCase_ ) for key in inputs.keys(): if key in self.batch_params: _lowercase =batch_size * [inputs[key]] _lowercase =pipe(**UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase): def __A (self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def __A (self ) -> Union[str, Any]: _lowercase =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) _lowercase =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) _lowercase =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) _lowercase =pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 ) _lowercase =pipe( UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='''np''' , ).images[0] assert images.shape == (2_0, 6_4, 6_4, 3) assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
5
from math import ceil, sqrt def lowerCAmelCase_ ( __a = 1000000 ) -> int: """simple docstring""" lowerCamelCase__: Optional[int] =0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowerCamelCase__: str =1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'{solution() = }')
10
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { "configuration_autoformer": [ "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "AutoformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ "AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "AutoformerForPrediction", "AutoformerModel", "AutoformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
114
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]: """simple docstring""" assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]: """simple docstring""" lowerCamelCase__: int =tmp_path / "cache" lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features lowerCamelCase__: Union[str, Any] =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Union[str, Any] =tmp_path / "cache" lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_parquet_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Dict: """simple docstring""" if issubclass(__a , __a ): lowerCamelCase__: str =parquet_path elif issubclass(__a , __a ): lowerCamelCase__: str =[parquet_path] lowerCamelCase__: Optional[Any] =tmp_path / "cache" lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]: """simple docstring""" assert isinstance(__a , __a ) for split in splits: lowerCamelCase__: Optional[Any] =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: List[str] =ParquetDatasetReader( {"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]: """simple docstring""" lowerCamelCase__: List[Any] =tmp_path / "cache" lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =features.copy() if features else default_expected_features lowerCamelCase__: Union[str, Any] =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]: """simple docstring""" if split: lowerCamelCase__: Union[str, Any] ={split: parquet_path} else: lowerCamelCase__: int ="train" lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path} lowerCamelCase__: int =tmp_path / "cache" lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( __a , __a ) -> Tuple: """simple docstring""" lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" ) lowerCamelCase__: Optional[int] =pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( __a , __a ) -> List[Any]: """simple docstring""" lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" ) lowerCamelCase__: Union[str, Any] ={"image": [image_path]} lowerCamelCase__: int =Features({"image": Image()} ) lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a ) lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( __a , __a ) -> Any: """simple docstring""" assert get_writer_batch_size(__a ) == expected
10
0
from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 lowerCAmelCase_ = { # 1536-bit 5: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''', base=1_6, ), '''generator''': 2, }, # 2048-bit 1_4: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''', base=1_6, ), '''generator''': 2, }, # 3072-bit 1_5: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', base=1_6, ), '''generator''': 2, }, # 4096-bit 1_6: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199''' + '''FFFFFFFFFFFFFFFF''', base=1_6, ), '''generator''': 2, }, # 6144-bit 1_7: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08''' + '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B''' + '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9''' + '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6''' + '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8''' + '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C''' + '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718''' + '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D''' + '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D''' + '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226''' + '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC''' + '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26''' + '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB''' + '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2''' + '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127''' + '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406''' + '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918''' + '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151''' + '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03''' + '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F''' + '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B''' + '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632''' + '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E''' + '''6DCC4024FFFFFFFFFFFFFFFF''', base=1_6, ), '''generator''': 2, }, # 8192-bit 1_8: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD''' + '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831''' + '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B''' + '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF''' + '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6''' + '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3''' + '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328''' + '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C''' + '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE''' + '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4''' + '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300''' + '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568''' + '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9''' + '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B''' + '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A''' + '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36''' + '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1''' + '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92''' + '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47''' + '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71''' + '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''', base=1_6, ), '''generator''': 2, }, } class __lowerCAmelCase : def __init__(self , __magic_name__ = 14 ) -> None: '''simple docstring''' if group not in primes: raise ValueError('''Unsupported Group''' ) snake_case_ : Union[str, Any] = primes[group]["prime"] snake_case_ : Optional[int] = primes[group]["generator"] snake_case_ : Dict = int(hexlify(urandom(32 ) ) , base=16 ) def lowerCamelCase (self ) -> str: '''simple docstring''' return hex(self.__private_key )[2:] def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : str = pow(self.generator , self.__private_key , self.prime ) return hex(UpperCAmelCase_ )[2:] def lowerCamelCase (self , __magic_name__ ) -> bool: '''simple docstring''' return ( 2 <= key <= self.prime - 2 and pow(UpperCAmelCase_ , (self.prime - 1) // 2 , self.prime ) == 1 ) def lowerCamelCase (self , __magic_name__ ) -> str: '''simple docstring''' snake_case_ : int = int(UpperCAmelCase_ , base=16 ) if not self.is_valid_public_key(UpperCAmelCase_ ): raise ValueError('''Invalid public key''' ) snake_case_ : Dict = pow(UpperCAmelCase_ , self.__private_key , self.prime ) return shaaaa(str(UpperCAmelCase_ ).encode() ).hexdigest() @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> bool: '''simple docstring''' return ( 2 <= remote_public_key_str <= prime - 2 and pow(UpperCAmelCase_ , (prime - 1) // 2 , UpperCAmelCase_ ) == 1 ) @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ = 14 ) -> str: '''simple docstring''' snake_case_ : Dict = int(UpperCAmelCase_ , base=16 ) snake_case_ : str = int(UpperCAmelCase_ , base=16 ) snake_case_ : Union[str, Any] = primes[group]["prime"] if not DiffieHellman.is_valid_public_key_static(UpperCAmelCase_ , UpperCAmelCase_ ): raise ValueError('''Invalid public key''' ) snake_case_ : List[Any] = pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return shaaaa(str(UpperCAmelCase_ ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
279
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __A = "." if __name__ == "__main__": __A = os.path.join(REPO_PATH, "utils/documentation_tests.txt") __A = [] __A = [] with open(doctest_file_path) as fp: for line in fp: __A = line.strip() __A = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __A = "\n".join(non_existent_paths) raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}') if all_paths != sorted(all_paths): raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
10
0
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = len(__a ) for i in range(__a ): for j in range(i + 1 , __a ): if numbers[j] < numbers[i]: UpperCAmelCase : Optional[Any] = numbers[j], numbers[i] return numbers if __name__ == "__main__": a : List[Any] = input("Enter numbers separated by a comma:\n").strip() a : Union[str, Any] = [int(item) for item in user_input.split(",")] print(exchange_sort(unsorted))
311
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any: '''simple docstring''' super().__init__(**UpperCAmelCase_) if self.framework == "tf": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""") requires_backends(self , "vision") self.check_model_type(UpperCAmelCase_) def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]: '''simple docstring''' if "text_queries" in kwargs: lowerCamelCase__: Any =kwargs.pop("text_queries") if isinstance(UpperCAmelCase_ , (str, Image.Image)): lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels} else: lowerCamelCase__: Any =image lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_) return results def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict: '''simple docstring''' lowerCamelCase__: List[str] ={} if "threshold" in kwargs: lowerCamelCase__: List[Any] =kwargs["threshold"] if "top_k" in kwargs: lowerCamelCase__: Any =kwargs["top_k"] return {}, {}, postprocess_params def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: List[Any] =load_image(inputs["image"]) lowerCamelCase__: Dict =inputs["candidate_labels"] if isinstance(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: Any =candidate_labels.split(",") lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(UpperCAmelCase_): lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework) lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework) yield { "is_last": i == len(UpperCAmelCase_) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Dict =model_inputs.pop("target_size") lowerCamelCase__: Dict =model_inputs.pop("candidate_label") lowerCamelCase__: Dict =model_inputs.pop("is_last") lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_) lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple: '''simple docstring''' lowerCamelCase__: Union[str, Any] =[] for model_output in model_outputs: lowerCamelCase__: Optional[Any] =model_output["candidate_label"] lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_) lowerCamelCase__: Dict =self.image_processor.post_process_object_detection( outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0] for index in outputs["scores"].nonzero(): lowerCamelCase__: Dict =outputs["scores"][index].item() lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0]) lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box} results.append(UpperCAmelCase_) lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_) if top_k: lowerCamelCase__: Dict =results[:top_k] return results def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.") lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist() lowerCamelCase__: Optional[int] ={ "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
10
0
import os from distutils.util import strtobool def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> Tuple: for e in env_keys: UpperCAmelCase : List[Any] = int(os.environ.get(__a , -1 ) ) if val >= 0: return val return default def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Dict=False ) -> List[Any]: UpperCAmelCase : str = os.environ.get(__a , str(__a ) ) return strtobool(__a ) == 1 # As its name indicates `strtobool` actually returns an int... def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any]="no" ) -> List[str]: UpperCAmelCase : List[Any] = os.environ.get(__a , str(__a ) ) return value
336
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = (DDPMParallelScheduler,) def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any: '''simple docstring''' lowerCamelCase__: Any ={ "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**UpperCAmelCase_) return config def SCREAMING_SNAKE_CASE_ (self : int) ->Dict: '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]: '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple: '''simple docstring''' self.check_over_configs(thresholding=UpperCAmelCase_) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , ) def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]: '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int) ->int: '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str: '''simple docstring''' lowerCamelCase__: Dict =self.scheduler_classes[0] lowerCamelCase__: Tuple =self.get_scheduler_config() lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5 def SCREAMING_SNAKE_CASE_ (self : Any) ->str: '''simple docstring''' lowerCamelCase__: int =self.scheduler_classes[0] lowerCamelCase__: Tuple =self.get_scheduler_config() lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: str =len(UpperCAmelCase_) lowerCamelCase__: Optional[int] =self.dummy_model() lowerCamelCase__: int =self.dummy_sample_deter lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1 lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1 lowerCamelCase__: Optional[Any] =samplea.shape[0] lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0) lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_) lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1)) lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1)) lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_)) lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_)) assert abs(result_sum.item() - 1153.1833) < 1E-2 assert abs(result_mean.item() - 0.5005) < 1E-3 def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Any =self.scheduler_classes[0] lowerCamelCase__: Optional[Any] =self.get_scheduler_config() lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =self.dummy_model() lowerCamelCase__: List[Any] =self.dummy_sample_deter lowerCamelCase__: int =torch.manual_seed(0) for t in reversed(range(UpperCAmelCase_)): # 1. predict noise residual lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_) # 2. predict previous mean of sample x_t-1 lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample lowerCamelCase__: Any =pred_prev_sample lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_)) lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_)) assert abs(result_sum.item() - 258.9606) < 1E-2 assert abs(result_mean.item() - 0.3372) < 1E-3 def SCREAMING_SNAKE_CASE_ (self : int) ->Any: '''simple docstring''' lowerCamelCase__: Tuple =self.scheduler_classes[0] lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction") lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: str =len(UpperCAmelCase_) lowerCamelCase__: str =self.dummy_model() lowerCamelCase__: str =self.dummy_sample_deter lowerCamelCase__: Dict =torch.manual_seed(0) for t in reversed(range(UpperCAmelCase_)): # 1. predict noise residual lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_) # 2. predict previous mean of sample x_t-1 lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample lowerCamelCase__: List[str] =pred_prev_sample lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_)) lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_)) assert abs(result_sum.item() - 202.0296) < 1E-2 assert abs(result_mean.item() - 0.2631) < 1E-3 def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]: '''simple docstring''' lowerCamelCase__: str =self.scheduler_classes[0] lowerCamelCase__: Union[str, Any] =self.get_scheduler_config() lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =scheduler.timesteps for i, timestep in enumerate(UpperCAmelCase_): if i == len(UpperCAmelCase_) - 1: lowerCamelCase__: Dict =-1 else: lowerCamelCase__: Union[str, Any] =timesteps[i + 1] lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_) lowerCamelCase__: str =prev_t.item() self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Tuple =self.scheduler_classes[0] lowerCamelCase__: List[Any] =self.get_scheduler_config() lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0] with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__: Dict =self.scheduler_classes[0] lowerCamelCase__: Any =self.get_scheduler_config() lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0] lowerCamelCase__: int =len(UpperCAmelCase_) with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any: '''simple docstring''' lowerCamelCase__: Tuple =self.scheduler_classes[0] lowerCamelCase__: Optional[Any] =self.get_scheduler_config() lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCAmelCase_)
10
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase__ = float('nan') class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase) -> Optional[Any]: _A : List[Any] = sys.stdout _A : str = open(__lowerCamelCase , "a") def __getattr__( self , __lowerCamelCase) -> List[str]: return getattr(self.stdout , __lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> str: self.stdout.write(__lowerCamelCase) # strip tqdm codes self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M)) def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ): _A : Tuple = [] # deal with critical env vars _A : Dict = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: _A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ ) if val is not None: cmd.append(f"{key}={val}" ) # python executable (not always needed if the script is executable) _A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(UpperCamelCase__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes _A : Tuple = [] _A : Dict = "" while len(UpperCamelCase__ ) > 0: current_line += f"{cmd.pop(0 )} " if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(UpperCamelCase__ ) _A : Union[str, Any] = "" return "\\\n".join(UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ): # unwrap multi-line input _A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own _A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += f" --output_dir {output_dir}" # ensure we have --overwrite_output_dir _A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , ) _A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams _A : Tuple = variation.replace(" " , "-" ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f: f.write(result.stdout ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f: _A : List[str] = json.load(UpperCamelCase__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ): _A : Union[str, Any] = [] _A : Optional[int] = [] _A : Any = f"{id}: {variation:<{longest_variation_len}}" _A : Dict = f"{preamble}: " _A : Union[str, Any] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ): _A : Optional[Any] = process_run_single( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _A : Optional[Any] = single_run_metrics[target_metric_key] if not math.isnan(UpperCamelCase__ ): metrics.append(UpperCamelCase__ ) results.append(UpperCamelCase__ ) outcome += "✓" else: outcome += "✘" _A : str = f"\33[2K\r{outcome}" if len(UpperCamelCase__ ) > 0: _A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} _A : Any = round(mean_metrics[target_metric_key] , 2 ) _A : Tuple = f"{outcome} {mean_target}" if len(UpperCamelCase__ ) > 1: results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}" print(UpperCamelCase__ ) _A : Optional[int] = variation return mean_metrics else: print(UpperCamelCase__ ) return {variation_key: variation, target_metric_key: nan} def _UpperCAmelCase (): _A : int = torch.cuda.get_device_properties(torch.device("cuda" ) ) return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n" def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ): _A : Any = pd.DataFrame(UpperCamelCase__ ) _A : List[str] = "variation" _A : List[Any] = "diff_%" _A : int = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan _A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(UpperCamelCase__ ): # as a fallback, use the minimal value as the sentinel _A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(UpperCamelCase__ ): _A : Optional[Any] = df.apply( lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns _A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys] _A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols # capitalize _A : Tuple = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible _A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" ) _A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" ) _A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] print("\n\n".join(UpperCamelCase__ ) ) def _UpperCAmelCase (): _A : int = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , ) parser.add_argument( "--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) _A : int = parser.parse_args() _A : Union[str, Any] = args.output_dir Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) _A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ ) # split each dimension into its --foo variations _A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty _A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) ) _A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations ) # split wanted keys _A : str = args.report_metric_keys.split() # capture prints into a log file for convenience _A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt" print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" ) print(f"and this script's output is also piped into {report_fn}" ) _A : Tuple = Tee(UpperCamelCase__ ) print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" ) print(f"Base command: {' '.join(UpperCamelCase__ )}" ) _A : str = "variation" _A : Union[str, Any] = [] for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ): _A : Dict = base_cmd + variation.split() results.append( process_run( id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) ) process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ ) if __name__ == "__main__": main()
11
1
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process lowerCAmelCase__ = logging.getLogger(__name__) lowerCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(a)} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __SCREAMING_SNAKE_CASE = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def _lowerCamelCase ( self) -> Optional[Any]: if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path") @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "The name of the dataset to use (via the datasets library)."}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}) __SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Overwrite the cached training and evaluation sets"}) __SCREAMING_SNAKE_CASE = field( default=5 , metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "The number of processes to use for the preprocessing."} , ) __SCREAMING_SNAKE_CASE = field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) def _lowerCamelCase ( self) -> Union[str, Any]: if self.train_file is not None: _A : Any = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: _A : int = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Dict ): with open(UpperCamelCase__ , "r" , encoding="utf-8" ) as f: _A : Union[str, Any] = [json.loads(UpperCamelCase__ ) for line in f.read().splitlines() if (len(UpperCamelCase__ ) > 0 and not line.isspace())] assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ) _A : Dict = {c: dataset[c] for c in dataset.column_names} _A : str = refs return Dataset.from_dict(UpperCamelCase__ ) def _UpperCAmelCase (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A : Optional[int] = parser.parse_args_into_dataclasses() # Detecting last checkpoint. _A : Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , UpperCamelCase__ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _A : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): _A : Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"train[:{data_args.validation_split_percentage}%]" , ) _A : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"train[{data_args.validation_split_percentage}%:]" , ) else: _A : int = {} if data_args.train_file is not None: _A : Any = data_args.train_file if data_args.validation_file is not None: _A : List[str] = data_args.validation_file _A : int = data_args.train_file.split("." )[-1] if extension == "txt": _A : Tuple = "text" _A : Dict = load_dataset(UpperCamelCase__ , data_files=UpperCamelCase__ ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A : Dict = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: _A : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **UpperCamelCase__ ) elif model_args.model_name_or_path: _A : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ ) else: _A : Optional[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}" ) config.update_from_string(model_args.config_overrides ) logger.info(f"New config: {config}" ) _A : Optional[int] = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: _A : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCamelCase__ ) elif model_args.model_name_or_path: _A : Any = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: _A : Dict = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _A : Tuple = AutoModelForMaskedLM.from_config(UpperCamelCase__ ) model.resize_token_embeddings(len(UpperCamelCase__ ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: _A : str = datasets["train"].column_names else: _A : Tuple = datasets["validation"].column_names _A : Union[str, Any] = "text" if "text" in column_names else column_names[0] _A : List[Any] = "max_length" if data_args.pad_to_max_length else False def tokenize_function(UpperCamelCase__ : Dict ): # Remove empty lines _A : Union[str, Any] = [line for line in examples["text"] if len(UpperCamelCase__ ) > 0 and not line.isspace()] return tokenizer(examples["text"] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=data_args.max_seq_length ) _A : Optional[int] = datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: _A : str = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: _A : Tuple = add_chinese_references( tokenized_datasets["validation"] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer _A : Union[str, Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: _A : Any = False # Data collator # This one will take care of randomly masking the tokens. _A : Union[str, Any] = DataCollatorForWholeWordMask(tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer _A : Dict = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , ) # Training if training_args.do_train: if last_checkpoint is not None: _A : Dict = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): _A : Tuple = model_args.model_name_or_path else: _A : Tuple = None _A : str = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) trainer.save_model() # Saves the tokenizer too for easy upload _A : Optional[Any] = os.path.join(training_args.output_dir , "train_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCamelCase__ , "w" ) as writer: logger.info("***** Train results *****" ) for key, value in sorted(train_result.metrics.items() ): logger.info(f" {key} = {value}" ) writer.write(f"{key} = {value}\n" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # Evaluation _A : str = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) _A : List[Any] = trainer.evaluate() _A : Optional[Any] = math.exp(eval_output["eval_loss"] ) _A : Dict = perplexity _A : Dict = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" ) if trainer.is_world_process_zero(): with open(UpperCamelCase__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in sorted(results.items() ): logger.info(f" {key} = {value}" ) writer.write(f"{key} = {value}\n" ) return results def _UpperCAmelCase (UpperCamelCase__ : Tuple ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
11
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') lowerCAmelCase__ = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __SCREAMING_SNAKE_CASE = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Overwrite the cached training and evaluation sets"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "The number of processes to use for the preprocessing."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def _lowerCamelCase ( self) -> int: if self.train_file is not None: _A : Optional[int] = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _A : Dict = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def __call__( self , __lowerCamelCase) -> str: _A : List[Any] = "label" if "label" in features[0].keys() else "labels" _A : Any = [feature.pop(__lowerCamelCase) for feature in features] _A : Optional[int] = len(__lowerCamelCase) _A : int = len(features[0]["input_ids"]) _A : Tuple = [ [{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features ] _A : str = list(chain(*__lowerCamelCase)) _A : Tuple = self.tokenizer.pad( __lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten _A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()} # Add back labels _A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa) return batch def _UpperCAmelCase (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A : int = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) datasets.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _A : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _A : List[str] = {} if data_args.train_file is not None: _A : Optional[int] = data_args.train_file if data_args.validation_file is not None: _A : Tuple = data_args.validation_file _A : Union[str, Any] = data_args.train_file.split("." )[-1] _A : List[str] = load_dataset( UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. _A : Union[str, Any] = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _A : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _A : List[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _A : str = [f"ending{i}" for i in range(4 )] _A : Union[str, Any] = "sent1" _A : str = "sent2" if data_args.max_seq_length is None: _A : Any = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) _A : Optional[Any] = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _A : int = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCamelCase__ : List[Any] ): _A : List[Any] = [[context] * 4 for context in examples[context_name]] _A : Any = examples[question_header_name] _A : Union[str, Any] = [ [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ ) ] # Flatten out _A : Dict = list(chain(*UpperCamelCase__ ) ) _A : List[Any] = list(chain(*UpperCamelCase__ ) ) # Tokenize _A : str = tokenizer( UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) _A : Optional[int] = raw_datasets["train"] if data_args.max_train_samples is not None: _A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples ) _A : Any = train_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): _A : Optional[int] = train_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) _A : Optional[int] = raw_datasets["validation"] if data_args.max_eval_samples is not None: _A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples ) _A : Dict = eval_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): _A : List[str] = eval_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator _A : str = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCamelCase__ : Tuple ): _A , _A : List[str] = eval_predictions _A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _A : List[str] = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: _A : Any = None if training_args.resume_from_checkpoint is not None: _A : Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A : int = last_checkpoint _A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) trainer.save_model() # Saves the tokenizer too for easy upload _A : Optional[int] = train_result.metrics _A : Tuple = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ ) ) _A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("train" , UpperCamelCase__ ) trainer.save_metrics("train" , UpperCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _A : List[Any] = trainer.evaluate() _A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ ) _A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("eval" , UpperCamelCase__ ) trainer.save_metrics("eval" , UpperCamelCase__ ) _A : Tuple = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase__ ) else: trainer.create_model_card(**UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
11
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } lowerCAmelCase__ = { 'facebook/mbart-large-en-ro': 10_24, 'facebook/mbart-large-cc25': 10_24, } # fmt: off lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = MBartTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it _A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , ) _A : Union[str, Any] = vocab_file _A : int = False if not self.vocab_file else True _A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "en_XX" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : List[str] = [self.sep_token_id] _A : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : str = src_lang _A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Dict = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding: _A : Any = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> List[str]: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : int = self.convert_tokens_to_ids(__lowerCamelCase) _A : int = [] _A : List[str] = [self.eos_token_id, self.cur_lang_code] _A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : str = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase) _A : List[Any] = [] _A : str = [self.eos_token_id, self.cur_lang_code] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : str = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : int = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ]) class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self) -> str: if self.framework == "pytorch": subprocess.run( F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , ) assert hasattr(self , "env") def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: _A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings _A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , ) def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv") @parameterized.expand([(2,)]) def _lowerCamelCase ( self , __lowerCamelCase) -> Any: # create estimator _A : Union[str, Any] = self.create_estimator(__lowerCamelCase) # run training estimator.fit() # result dataframe _A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) _A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _A : Optional[Any] = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
11
1
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ): if radian_mode: return [magnitude * cos(UpperCamelCase__ ), magnitude * sin(UpperCamelCase__ )] return [magnitude * cos(radians(UpperCamelCase__ ) ), magnitude * sin(radians(UpperCamelCase__ ) )] def _UpperCAmelCase (UpperCamelCase__ : NDArray[floataa] , UpperCamelCase__ : NDArray[floataa] , UpperCamelCase__ : float = 10**-1 ): _A : NDArray[floataa] = cross(UpperCamelCase__ , UpperCamelCase__ ) _A : float = sum(UpperCamelCase__ ) return abs(UpperCamelCase__ ) < eps if __name__ == "__main__": # Test to check if it works lowerCAmelCase__ = array( [ polar_force(7_18.4, 1_80 - 30), polar_force(8_79.54, 45), polar_force(1_00, -90), ] ) lowerCAmelCase__ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg lowerCAmelCase__ = array( [ polar_force(30 * 9.81, 15), polar_force(2_15, 1_80 - 45), polar_force(2_64, 90 - 30), ] ) lowerCAmelCase__ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg lowerCAmelCase__ = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]]) lowerCAmelCase__ = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
11
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"] __SCREAMING_SNAKE_CASE = "OwlViTImageProcessor" __SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]: _A : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __lowerCamelCase , ) _A : List[Any] = kwargs.pop("feature_extractor") _A : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(__lowerCamelCase , __lowerCamelCase) def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none.") if text is not None: if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)): _A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)] elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase): _A : Optional[Any] = [] # Maximum number of queries across batch _A : str = max([len(__lowerCamelCase) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(__lowerCamelCase) != max_num_queries: _A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase)) _A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) encodings.append(__lowerCamelCase) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings") if return_tensors == "np": _A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch _A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0) _A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0) else: raise ValueError("Target return tensor type could not be returned") _A : Optional[Any] = BatchEncoding() _A : Tuple = input_ids _A : Dict = attention_mask if query_images is not None: _A : Optional[Any] = BatchEncoding() _A : List[str] = self.image_processor( __lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values _A : Union[str, Any] = query_pixel_values if images is not None: _A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) if text is not None and images is not None: _A : Tuple = image_features.pixel_values return encoding elif query_images is not None and images is not None: _A : int = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]: return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase) @property def _lowerCamelCase ( self) -> int: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , ) return self.image_processor_class @property def _lowerCamelCase ( self) -> List[str]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , ) return self.image_processor
11
1
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): if len(UpperCamelCase__ ) < k or k < 0: raise ValueError("Invalid Input" ) _A : int = sum(array[:k] ) for i in range(len(UpperCamelCase__ ) - k ): _A : Optional[int] = current_sum - array[i] + array[i + k] _A : str = max(UpperCamelCase__ , UpperCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() lowerCAmelCase__ = [randint(-10_00, 10_00) for i in range(1_00)] lowerCAmelCase__ = randint(0, 1_10) print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
11
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]): _A : Optional[int] = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(__lowerCamelCase) def _lowerCamelCase ( self) -> int: _A : Optional[int] = "sshleifer/tiny-gpt2" _A : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : List[str] = PyTorchBenchmark(__lowerCamelCase) _A : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Dict: _A : int = "sgugger/tiny-distilbert-classification" _A : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , ) _A : Dict = PyTorchBenchmark(__lowerCamelCase) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Optional[Any]: _A : Tuple = "sshleifer/tiny-gpt2" _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase) _A : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision") def _lowerCamelCase ( self) -> int: _A : Any = "sshleifer/tiny-gpt2" _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Any = PyTorchBenchmark(__lowerCamelCase) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Any: _A : Union[str, Any] = "sshleifer/tiny-gpt2" _A : Any = AutoConfig.from_pretrained(__lowerCamelCase) # set architectures equal to `None` _A : Dict = None _A : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> int: _A : List[Any] = "sshleifer/tiny-gpt2" _A : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase) _A : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == "cpu" , "Can't do half precision") def _lowerCamelCase ( self) -> Optional[Any]: _A : Any = "sshleifer/tiny-gpt2" _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , ) _A : List[Any] = PyTorchBenchmark(__lowerCamelCase) _A : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> str: _A : List[str] = "sshleifer/tiny-gpt2" _A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> int: _A : Tuple = "sshleifer/tinier_bart" _A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> str: _A : List[Any] = "sshleifer/tiny-gpt2" _A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> int: _A : int = "sshleifer/tinier_bart" _A : str = AutoConfig.from_pretrained(__lowerCamelCase) _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> Dict: _A : List[str] = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , ) _A : Tuple = PyTorchBenchmark(__lowerCamelCase) benchmark.run() self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists()) def _lowerCamelCase ( self) -> int: _A : Dict = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(__lowerCamelCase): self.assertTrue(hasattr(__lowerCamelCase , "sequential")) self.assertTrue(hasattr(__lowerCamelCase , "cumulative")) self.assertTrue(hasattr(__lowerCamelCase , "current")) self.assertTrue(hasattr(__lowerCamelCase , "total")) with tempfile.TemporaryDirectory() as tmp_dir: _A : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , ) _A : Optional[int] = PyTorchBenchmark(__lowerCamelCase) _A : Dict = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
11
1
def _UpperCAmelCase (UpperCamelCase__ : int ): if num < 0: return False _A : int = num _A : int = 0 while num > 0: _A : Optional[Any] = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
11
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } lowerCAmelCase__ = { 'facebook/nllb-large-en-ro': 10_24, 'facebook/nllb-200-distilled-600M': 10_24, } # fmt: off lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = NllbTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it _A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token _A : Optional[int] = legacy_behaviour super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , ) _A : int = vocab_file _A : Optional[Any] = False if not self.vocab_file else True _A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "eng_Latn" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : List[str] = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : Tuple = [self.sep_token_id] _A : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : List[Any] = src_lang _A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Tuple = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding: _A : Tuple = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> str: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[str]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = self.convert_tokens_to_ids(__lowerCamelCase) if self.legacy_behaviour: _A : List[str] = [] _A : Dict = [self.eos_token_id, self.cur_lang_code] else: _A : Tuple = [self.cur_lang_code] _A : Optional[Any] = [self.eos_token_id] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase) if self.legacy_behaviour: _A : Tuple = [] _A : Any = [self.eos_token_id, self.cur_lang_code] else: _A : Union[str, Any] = [self.cur_lang_code] _A : str = [self.eos_token_id] _A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : Dict = self.convert_ids_to_tokens(self.suffix_tokens) _A : Union[str, Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : Dict = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', }, 'tokenizer_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json', }, } lowerCAmelCase__ = { 'albert-base-v1': 5_12, 'albert-large-v1': 5_12, 'albert-xlarge-v1': 5_12, 'albert-xxlarge-v1': 5_12, 'albert-base-v2': 5_12, 'albert-large-v2': 5_12, 'albert-xlarge-v2': 5_12, 'albert-xxlarge-v2': 5_12, } lowerCAmelCase__ = '▁' class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = AlbertTokenizer def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , **__lowerCamelCase , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _A : Tuple = ( AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token ) super().__init__( __lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , ) _A : List[str] = do_lower_case _A : Any = remove_space _A : Tuple = keep_accents _A : List[Any] = vocab_file _A : Any = False if not self.vocab_file else True def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : Optional[Any] = [self.sep_token_id] _A : List[str] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : str = [self.sep_token_id] _A : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return _A : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {} lowerCAmelCase__ = {} lowerCAmelCase__ = {} def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ): _A : Union[str, Any] = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" ) _A : Dict = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" ) _A : Dict = format_type def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ): _A : Union[str, Any] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): _A : Union[str, Any] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ): if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ): _A : List[str] = get_format_type_from_alias(UpperCamelCase__ ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**UpperCamelCase__ ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
11
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "dandelin/vilt-b32-finetuned-vqa" __SCREAMING_SNAKE_CASE = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) __SCREAMING_SNAKE_CASE = "image_qa" __SCREAMING_SNAKE_CASE = AutoProcessor __SCREAMING_SNAKE_CASE = AutoModelForVisualQuestionAnswering __SCREAMING_SNAKE_CASE = ["image", "text"] __SCREAMING_SNAKE_CASE = ["text"] def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> int: requires_backends(self , ["vision"]) super().__init__(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Tuple: return self.pre_processor(__lowerCamelCase , __lowerCamelCase , return_tensors="pt") def _lowerCamelCase ( self , __lowerCamelCase) -> List[Any]: with torch.no_grad(): return self.model(**__lowerCamelCase).logits def _lowerCamelCase ( self , __lowerCamelCase) -> List[Any]: _A : str = outputs.argmax(-1).item() return self.model.config.idalabel[idx]
11
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) _A : int = (boundary[1] - boundary[0]) / steps _A : Any = boundary[0] _A : List[Any] = boundary[1] _A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _A : str = 0.0 y += (h / 2.0) * f(UpperCamelCase__ ) for i in x_i: # print(i) y += h * f(UpperCamelCase__ ) y += (h / 2.0) * f(UpperCamelCase__ ) return y def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ): _A : Optional[int] = a + h while x < (b - h): yield x _A : Dict = x + h def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here _A : Any = (x - 0) * (x - 0) return y def _UpperCAmelCase (): _A : Optional[Any] = 0.0 # Lower bound of integration _A : Optional[int] = 1.0 # Upper bound of integration _A : List[Any] = 10.0 # define number of steps or resolution _A : Any = [a, b] # define boundary of integration _A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ ) print(f"y = {y}" ) if __name__ == "__main__": main()
11
1
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder lowerCAmelCase__ = 'base_with_context' def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ): _A : Tuple = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) ) _A : int = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCamelCase__ ) for lyr_num, lyr in enumerate(model.encoders ): _A : Tuple = weights[f"layers_{lyr_num}"] _A : Any = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) _A : Optional[int] = ly_weight["attention"] _A : str = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) _A : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) _A : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) _A : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) _A : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) _A : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) _A : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) _A : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) _A : str = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def _UpperCAmelCase (UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ): _A : Any = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) ) _A : List[Any] = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCamelCase__ ) for lyr_num, lyr in enumerate(model.encoders ): _A : List[str] = weights[f"layers_{lyr_num}"] _A : Any = ly_weight["attention"] _A : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) _A : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) _A : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) _A : Dict = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) _A : Optional[Any] = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) _A : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) _A : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) _A : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) _A : Dict = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) _A : Optional[int] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ): _A : Optional[int] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) ) _A : Any = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) ) _A : List[str] = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCamelCase__ ) _A : Optional[Any] = nn.Parameter( torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _A : str = weights[f"layers_{lyr_num}"] _A : List[Any] = nn.Parameter( torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) ) _A : Dict = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) ) _A : List[Any] = ly_weight["self_attention"] _A : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) _A : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) _A : str = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) _A : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) _A : Union[str, Any] = ly_weight["MultiHeadDotProductAttention_0"] _A : Any = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) _A : Any = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) _A : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) _A : Any = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) _A : List[Any] = nn.Parameter( torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) ) _A : int = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) _A : Any = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) ) _A : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) _A : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) _A : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) _A : Tuple = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) ) _A : Tuple = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) ) return model def _UpperCAmelCase (UpperCamelCase__ : List[str] ): _A : Dict = checkpoints.load_tax_checkpoint(args.checkpoint_path ) _A : Dict = jnp.tree_util.tree_map(onp.array , UpperCamelCase__ ) _A : List[str] = [ "from __gin__ import dynamic_registration", "from music_spectrogram_diffusion.models.diffusion import diffusion_utils", "diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0", "diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()", ] _A : str = os.path.join(args.checkpoint_path , ".." , "config.gin" ) _A : Tuple = inference.parse_training_gin_file(UpperCamelCase__ , UpperCamelCase__ ) _A : int = inference.InferenceModel(args.checkpoint_path , UpperCamelCase__ ) _A : List[str] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" ) _A : List[str] = SpectrogramNotesEncoder( max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) _A : Any = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) _A : Dict = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) _A : List[Any] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , UpperCamelCase__ ) _A : List[Any] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , UpperCamelCase__ ) _A : int = load_decoder(ta_checkpoint["target"]["decoder"] , UpperCamelCase__ ) _A : Tuple = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" ) _A : List[Any] = SpectrogramDiffusionPipeline( notes_encoder=UpperCamelCase__ , continuous_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ , scheduler=UpperCamelCase__ , melgan=UpperCamelCase__ , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument( '--checkpoint_path', default=f"{MODEL}/checkpoint_500000", type=str, required=False, help='Path to the original jax model checkpoint.', ) lowerCAmelCase__ = parser.parse_args() main(args)
11
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @parameterized.expand([(None,), ("foo.json",)]) def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]: _A : str = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) _A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 5_0) self.assertEqual(loaded_config.max_length , 2_0) self.assertEqual(loaded_config.max_time , __lowerCamelCase) def _lowerCamelCase ( self) -> Optional[int]: _A : Optional[int] = AutoConfig.from_pretrained("gpt2") _A : int = GenerationConfig.from_model_config(__lowerCamelCase) _A : List[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(__lowerCamelCase , __lowerCamelCase) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _lowerCamelCase ( self) -> Optional[Any]: _A : Optional[Any] = GenerationConfig() _A : List[Any] = { "max_new_tokens": 1_0_2_4, "foo": "bar", } _A : List[str] = copy.deepcopy(__lowerCamelCase) _A : int = generation_config.update(**__lowerCamelCase) # update_kwargs was not modified (no side effects) self.assertEqual(__lowerCamelCase , __lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_0_2_4) # `.update()` returns a dictionary of unused kwargs self.assertEqual(__lowerCamelCase , {"foo": "bar"}) def _lowerCamelCase ( self) -> Any: _A : int = GenerationConfig() _A : int = "bar" with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(__lowerCamelCase) _A : Any = GenerationConfig.from_pretrained(__lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , "bar") _A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase) assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config def _lowerCamelCase ( self) -> List[str]: _A : Union[str, Any] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , __lowerCamelCase) self.assertEqual(default_config.num_beams , 1) _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , __lowerCamelCase) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase) _A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @classmethod def _lowerCamelCase ( cls) -> Optional[int]: _A : Dict = TOKEN HfFolder.save_token(__lowerCamelCase) @classmethod def _lowerCamelCase ( cls) -> List[Any]: try: delete_repo(token=cls._token , repo_id="test-generation-config") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org") except HTTPError: pass def _lowerCamelCase ( self) -> Any: _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("test-generation-config" , use_auth_token=self._token) _A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="test-generation-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Union[str, Any] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token) _A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
11
1
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self) -> List[Any]: _A : str = 1_0 def _lowerCamelCase ( self) -> Any: _A : Optional[int] = [1, 2, 3, 4] _A : Optional[int] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0) , __lowerCamelCase) def _lowerCamelCase ( self) -> Dict: _A : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] _A : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0) , __lowerCamelCase) def _lowerCamelCase ( self) -> Optional[Any]: _A : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] _A : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0) , __lowerCamelCase) def _lowerCamelCase ( self) -> Tuple: _A : List[Any] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." _A , _A : int = process_story(__lowerCamelCase) self.assertEqual(__lowerCamelCase , []) def _lowerCamelCase ( self) -> Optional[int]: _A : List[Any] = "" _A , _A : Optional[int] = process_story(__lowerCamelCase) self.assertEqual(__lowerCamelCase , []) self.assertEqual(__lowerCamelCase , []) def _lowerCamelCase ( self) -> List[Any]: _A : Optional[Any] = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) _A , _A : str = process_story(__lowerCamelCase) _A : Any = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(__lowerCamelCase , __lowerCamelCase) _A : List[Any] = ["It was the best of times."] self.assertEqual(__lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> int: _A : Dict = torch.tensor([1, 2, 3, 4]) _A : int = torch.tensor([1, 1, 1, 1]) np.testing.assert_array_equal(build_mask(__lowerCamelCase , 0).numpy() , expected.numpy()) def _lowerCamelCase ( self) -> int: _A : Optional[int] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3]) _A : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(__lowerCamelCase , 2_3).numpy() , expected.numpy()) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Optional[int] = torch.tensor([8, 2, 3, 4, 1, 1, 1]) _A : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(__lowerCamelCase , 1).numpy() , expected.numpy()) def _lowerCamelCase ( self) -> str: _A : Dict = 1_0_1 _A : List[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]]) _A : Any = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]]) _A : str = compute_token_type_ids(__lowerCamelCase , __lowerCamelCase) np.testing.assert_array_equal(__lowerCamelCase , __lowerCamelCase)
11
import pickle import numpy as np from matplotlib import pyplot as plt class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str: _A : Optional[int] = bp_numa _A : Dict = bp_numa _A : Tuple = bp_numa _A : List[str] = conva_get[:2] _A : Tuple = conva_get[2] _A : Optional[int] = size_pa _A : Optional[Any] = rate_w _A : Optional[Any] = rate_t _A : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] _A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Any = -2 * np.random.rand(self.conva[1]) + 1 _A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1 _A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1 def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # save model dict with pickle _A : Dict = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(__lowerCamelCase , "wb") as f: pickle.dump(__lowerCamelCase , __lowerCamelCase) print(F"Model saved: {save_path}") @classmethod def _lowerCamelCase ( cls , __lowerCamelCase) -> Any: # read saved model with open(__lowerCamelCase , "rb") as f: _A : Any = pickle.load(__lowerCamelCase) # noqa: S301 _A : Optional[int] = model_dic.get("conv1") conv_get.append(model_dic.get("step_conv1")) _A : str = model_dic.get("size_pooling1") _A : List[str] = model_dic.get("num_bp1") _A : Union[str, Any] = model_dic.get("num_bp2") _A : List[Any] = model_dic.get("num_bp3") _A : Dict = model_dic.get("rate_weight") _A : List[Any] = model_dic.get("rate_thre") # create model instance _A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # modify model parameter _A : List[Any] = model_dic.get("w_conv1") _A : Union[str, Any] = model_dic.get("wkj") _A : str = model_dic.get("vji") _A : List[str] = model_dic.get("thre_conv1") _A : Optional[Any] = model_dic.get("thre_bp2") _A : Dict = model_dic.get("thre_bp3") return conv_ins def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: return 1 / (1 + np.exp(-1 * x)) def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: return round(__lowerCamelCase , 3) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]: # convolution process _A : Tuple = convs[0] _A : Union[str, Any] = convs[1] _A : List[Any] = np.shape(__lowerCamelCase)[0] # get the data slice of original image data, data_focus _A : Tuple = [] for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): _A : Optional[int] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__lowerCamelCase) # calculate the feature map of every single kernel, and saved as list of matrix _A : Optional[Any] = [] _A : Optional[int] = int((size_data - size_conv) / conv_step + 1) for i_map in range(__lowerCamelCase): _A : Optional[int] = [] for i_focus in range(len(__lowerCamelCase)): _A : Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(__lowerCamelCase)) _A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape( __lowerCamelCase , __lowerCamelCase) data_featuremap.append(__lowerCamelCase) # expanding the data slice to One dimenssion _A : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__lowerCamelCase)) _A : Dict = np.asarray(__lowerCamelCase) return focus_list, data_featuremap def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict: # pooling process _A : Optional[Any] = len(featuremaps[0]) _A : str = int(size_map / size_pooling) _A : Optional[int] = [] for i_map in range(len(__lowerCamelCase)): _A : int = featuremaps[i_map] _A : Optional[int] = [] for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase): for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase): _A : str = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__lowerCamelCase)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__lowerCamelCase)) _A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase) featuremap_pooled.append(__lowerCamelCase) return featuremap_pooled def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: # expanding three dimension data to one dimension list _A : Tuple = [] for i in range(len(__lowerCamelCase)): _A : Union[str, Any] = np.shape(data[i]) _A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1]) _A : Optional[Any] = data_listed.getA().tolist()[0] data_expanded.extend(__lowerCamelCase) _A : Optional[Any] = np.asarray(__lowerCamelCase) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: # expanding matrix to one dimension list _A : List[Any] = np.asarray(__lowerCamelCase) _A : Union[str, Any] = np.shape(__lowerCamelCase) _A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Dict = [] _A : Any = 0 for i_map in range(__lowerCamelCase): _A : Union[str, Any] = np.ones((size_map, size_map)) for i in range(0 , __lowerCamelCase , __lowerCamelCase): for j in range(0 , __lowerCamelCase , __lowerCamelCase): _A : List[Any] = pd_pool[ i_pool ] _A : Tuple = i_pool + 1 _A : Optional[Any] = np.multiply( __lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(__lowerCamelCase) return pd_all def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]: # model traning print("----------------------Start Training-------------------------") print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase))) print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase))) _A : Tuple = 0 _A : Dict = [] _A : Optional[Any] = 1_0_0_0_0 while rp < n_repeat and mse >= error_accuracy: _A : Union[str, Any] = 0 print(F"-------------Learning Time {rp}--------------") for p in range(len(__lowerCamelCase)): # print('------------Learning Image: %d--------------'%p) _A : str = np.asmatrix(datas_train[p]) _A : Union[str, Any] = np.asarray(datas_teach[p]) _A , _A : Any = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = np.shape(__lowerCamelCase) _A : List[str] = self._expand(__lowerCamelCase) _A : Tuple = data_bp_input _A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa _A : List[Any] = self.sig(__lowerCamelCase) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa _A : List[str] = self.sig(__lowerCamelCase) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- _A : int = np.multiply( (data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Optional[Any] = np.multiply( np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji) _A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga) _A : Dict = pd_conva_pooled.T.getA().tolist() _A : Optional[Any] = self._calculate_gradient_from_pool( __lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): _A : int = self._expand_mat(pd_conva_all[k_conv]) _A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase) _A : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) _A : Any = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer _A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight _A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight _A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre _A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image _A : Optional[int] = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) _A : Any = rp + 1 _A : Dict = error_count / patterns all_mse.append(__lowerCamelCase) def draw_error(): _A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(__lowerCamelCase , "+-") plt.plot(__lowerCamelCase , "r--") plt.xlabel("Learning Times") plt.ylabel("All_mse") plt.grid(__lowerCamelCase , alpha=0.5) plt.show() print("------------------Training Complished---------------------") print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}")) if draw_e: draw_error() return mse def _lowerCamelCase ( self , __lowerCamelCase) -> int: # model predict _A : Union[str, Any] = [] print("-------------------Start Testing-------------------------") print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase))) for p in range(len(__lowerCamelCase)): _A : int = np.asmatrix(datas_test[p]) _A , _A : List[Any] = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : str = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = self._expand(__lowerCamelCase) _A : List[Any] = data_bp_input _A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa _A : int = self.sig(__lowerCamelCase) _A : int = bp_outa * self.wkj.T - self.thre_bpa _A : Optional[int] = self.sig(__lowerCamelCase) produce_out.extend(bp_outa.getA().tolist()) _A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out] return np.asarray(__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # return the data of image after convoluting process so we can check it out _A : Optional[int] = np.asmatrix(__lowerCamelCase) _A , _A : Tuple = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
11
1
lowerCAmelCase__ = 2_56 # Modulus to hash a string lowerCAmelCase__ = 1_00_00_03 def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : str ): _A : Optional[Any] = len(UpperCamelCase__ ) _A : str = len(UpperCamelCase__ ) if p_len > t_len: return False _A : Tuple = 0 _A : Dict = 0 _A : List[Any] = 1 # Calculating the hash of pattern and substring of text for i in range(UpperCamelCase__ ): _A : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _A : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _A : Union[str, Any] = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _A : Optional[int] = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def _UpperCAmelCase (): _A : Tuple = "abc1abc12" _A : Union[str, Any] = "alskfjaldsabc1abc1abc12k23adsfabcabc" _A : List[str] = "alskfjaldsk23adsfabcabc" assert rabin_karp(UpperCamelCase__ , UpperCamelCase__ ) and not rabin_karp(UpperCamelCase__ , UpperCamelCase__ ) # Test 2) _A : List[str] = "ABABX" _A : Dict = "ABABZABABYABABX" assert rabin_karp(UpperCamelCase__ , UpperCamelCase__ ) # Test 3) _A : int = "AAAB" _A : Optional[int] = "ABAAAAAB" assert rabin_karp(UpperCamelCase__ , UpperCamelCase__ ) # Test 4) _A : Dict = "abcdabcy" _A : List[Any] = "abcxabcdabxabcdabcdabcy" assert rabin_karp(UpperCamelCase__ , UpperCamelCase__ ) # Test 5) _A : List[str] = "Lü" _A : List[Any] = "Lüsai" assert rabin_karp(UpperCamelCase__ , UpperCamelCase__ ) _A : Optional[int] = "Lue" assert not rabin_karp(UpperCamelCase__ , UpperCamelCase__ ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
11
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowerCAmelCase__ = object() # For specifying empty leaf dict `{}` lowerCAmelCase__ = object() def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ): _A : str = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ): _A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )] if matches and all(UpperCamelCase__ ): return True return False def _UpperCAmelCase (UpperCamelCase__ : str ): def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ): for rule, replacement in rules: if _match(UpperCamelCase__ , UpperCamelCase__ ): return replacement return val return replace def _UpperCAmelCase (): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )), (("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )), (("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _UpperCAmelCase (UpperCamelCase__ : List[str] ): _A : int = _get_partition_rules() _A : Optional[int] = _replacement_rules(UpperCamelCase__ ) _A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )} _A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase__ ) )
11
1
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {} lowerCAmelCase__ = {} lowerCAmelCase__ = {} def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ): _A : Union[str, Any] = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" ) _A : Dict = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" ) _A : Dict = format_type def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ): _A : Union[str, Any] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): _A : Union[str, Any] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ): if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ): _A : List[str] = get_format_type_from_alias(UpperCamelCase__ ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**UpperCamelCase__ ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
11
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) _A : int = input_str.split("_" ) _A : str = 0 if use_pascal else 1 _A : str = words[start_index:] _A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize] _A : Any = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
11
1
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'} lowerCAmelCase__ = { 'vocab_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt', }, 'emoji_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json', }, } lowerCAmelCase__ = { 'abeja/gpt-neox-japanese-2.7b': 20_48, } def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ): with open(UpperCamelCase__ , "r" , encoding="utf-8" ) as f: _A : Tuple = json.loads(f.read() ) _A : str = collections.OrderedDict() _A : Union[str, Any] = collections.OrderedDict() _A : Union[str, Any] = collections.OrderedDict() with open(UpperCamelCase__ , "r" , encoding="utf-8" ) as f: _A : Any = f.readlines() _A : List[Any] = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token] for idx, b in enumerate(UpperCamelCase__ ): _A : Optional[Any] = b _A : List[str] = idx for wd in b: _A : Union[str, Any] = idx return vocab, raw_vocab, ids_to_tokens, emoji class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="<|endoftext|>" , __lowerCamelCase="<|endoftext|>" , __lowerCamelCase="<|startoftext|>" , __lowerCamelCase="<|endoftext|>" , __lowerCamelCase=False , **__lowerCamelCase , ) -> str: super().__init__( unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , do_clean_text=__lowerCamelCase , **__lowerCamelCase , ) if not os.path.isfile(__lowerCamelCase): raise ValueError( F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") if not os.path.isfile(__lowerCamelCase): raise ValueError( F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google" " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") _A : Optional[int] = do_clean_text _A , _A , _A , _A : Any = load_vocab_and_emoji(__lowerCamelCase , __lowerCamelCase) _A : Union[str, Any] = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji) @property def _lowerCamelCase ( self) -> Union[str, Any]: # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab) def _lowerCamelCase ( self) -> Union[str, Any]: return dict(self.raw_vocab , **self.added_tokens_encoder) def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: return self.subword_tokenizer.tokenize(__lowerCamelCase , clean=self.do_clean_text) def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token)) def _lowerCamelCase ( self , __lowerCamelCase) -> List[Any]: return self.subword_tokenizer.convert_id_to_token(__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> List[Any]: _A : List[str] = "".join(__lowerCamelCase).strip() return out_string def _lowerCamelCase ( self , __lowerCamelCase) -> List[int]: _A : Optional[Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase) + [self.eos_token_id]) if len(__lowerCamelCase) > self.model_max_length: _A : List[Any] = input_ids[-self.model_max_length :] return input_ids def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: _A : Optional[Any] = 0 if os.path.isdir(__lowerCamelCase): _A : Optional[int] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) _A : List[str] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"]) else: _A : Union[str, Any] = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) _A : Dict = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8") as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!") _A : List[Any] = token_index writer.write(",".join(__lowerCamelCase) + "\n") index += 1 with open(__lowerCamelCase , "w" , encoding="utf-8") as writer: json.dump(self.emoji , __lowerCamelCase) return vocab_file, emoji_file class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[Any]: _A : Optional[int] = vocab # same as swe _A : Optional[Any] = ids_to_tokens # same as bpe _A : Tuple = emoji _A : Dict = np.max([len(__lowerCamelCase) for w in self.vocab.keys()]) _A : int = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)") _A : int = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*") _A : str = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}") _A : Any = re.compile( r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*") _A : List[Any] = re.compile( r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*") _A : Optional[Any] = re.compile( r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*") _A : Tuple = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" _A : Dict = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" _A : str = str.maketrans({k: "<BLOCK>" for k in keisen + blocks}) def __len__( self) -> str: return len(self.ids_to_tokens) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: _A : Optional[Any] = self.content_repattera.sub("<URL>" , __lowerCamelCase) _A : Dict = self.content_repattera.sub("<EMAIL>" , __lowerCamelCase) _A : Optional[Any] = self.content_repattera.sub("<TEL>" , __lowerCamelCase) _A : Optional[int] = self.content_repattera.sub("<DATE>" , __lowerCamelCase) _A : List[str] = self.content_repattera.sub("<DATE>" , __lowerCamelCase) _A : List[str] = self.content_repattera.sub("<PRICE>" , __lowerCamelCase) _A : int = content.translate(self.content_transa) while "<BLOCK><BLOCK>" in content: _A : List[Any] = content.replace("<BLOCK><BLOCK>" , "<BLOCK>") return content def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=False) -> List[str]: _A : Tuple = text.replace(" " , "<SP>") _A : Tuple = text.replace(" " , "<SP>") _A : List[Any] = text.replace("\r\n" , "<BR>") _A : Any = text.replace("\n" , "<BR>") _A : Optional[Any] = text.replace("\r" , "<BR>") _A : str = text.replace("\t" , "<TAB>") _A : List[Any] = text.replace("—" , "ー") _A : List[Any] = text.replace("−" , "ー") for k, v in self.emoji["emoji"].items(): if k in text: _A : Tuple = text.replace(__lowerCamelCase , __lowerCamelCase) if clean: _A : Union[str, Any] = self.clean_text(__lowerCamelCase) def check_simbol(__lowerCamelCase): _A : Optional[int] = x.encode() if len(__lowerCamelCase) == 1 and len(__lowerCamelCase) == 2: _A : int = (int(e[0]) << 8) + int(e[1]) if ( (c >= 0xc_2a1 and c <= 0xc_2bf) or (c >= 0xc_780 and c <= 0xc_783) or (c >= 0xc_ab9 and c <= 0xc_bbf) or (c >= 0xc_c80 and c <= 0xc_da2) ): return True return False def checkuae(__lowerCamelCase): _A : Optional[Any] = x.encode() if len(__lowerCamelCase) == 1 and len(__lowerCamelCase) == 3: _A : Any = (int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2]) if c >= 0xe28_080 and c <= 0xe2b_07f: return True return False _A : Tuple = 0 _A : str = [] while pos < len(__lowerCamelCase): _A : List[str] = min(len(__lowerCamelCase) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3 _A : Tuple = [] # (token_id, token, pos) for e in range(__lowerCamelCase , __lowerCamelCase , -1): _A : Tuple = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(__lowerCamelCase) > 2: _A : str = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e)) if len(__lowerCamelCase) > 0: # the smallest token_id is adopted _A , _A , _A : Tuple = sorted(__lowerCamelCase , key=lambda __lowerCamelCase: x[0])[0] result.append(__lowerCamelCase) _A : Any = e else: _A : Union[str, Any] = pos + 1 _A : Any = text[pos:end] if check_simbol(__lowerCamelCase): result.append("<KIGOU>") elif checkuae(__lowerCamelCase): result.append("<U2000U2BFF>") else: for i in wd.encode("utf-8"): result.append("<|byte%d|>" % i) _A : Tuple = end return result def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase="\n") -> List[str]: _A : Union[str, Any] = [] _A : int = [] _A : List[str] = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2])) else: if len(__lowerCamelCase) > 0: words.append(bytearray(__lowerCamelCase).decode("utf-8" , errors="replace")) _A : str = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word]) elif word == "<SP>": words.append(" ") elif word == "<BR>": words.append(__lowerCamelCase) elif word == "<TAB>": words.append("\t") elif word == "<BLOCK>": words.append("▀") elif word == "<KIGOU>": words.append("ǀ") elif word == "<U2000U2BFF>": words.append("‖") else: words.append(__lowerCamelCase) if len(__lowerCamelCase) > 0: words.append(bytearray(__lowerCamelCase).decode("utf-8" , errors="replace")) _A : Optional[Any] = "".join(__lowerCamelCase) return text
11
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): _A : Dict = list(range(len(UpperCamelCase__ ) ) ) _A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )] index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ ) _A : float = 0 _A : list[float] = [0] * len(UpperCamelCase__ ) for i in index: if weight[i] <= capacity: _A : Union[str, Any] = 1 max_value += value[i] capacity -= weight[i] else: _A : Optional[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
11
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self) -> List[Any]: _A : str = 1 _A : Tuple = 3 _A : List[Any] = (3_2, 3_2) _A : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(__lowerCamelCase) return image @property def _lowerCamelCase ( self) -> Tuple: torch.manual_seed(0) _A : List[str] = UNetaDConditionModel( block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=__lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , ) return model @property def _lowerCamelCase ( self) -> Dict: torch.manual_seed(0) _A : Optional[int] = AutoencoderKL( block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _lowerCamelCase ( self) -> List[Any]: torch.manual_seed(0) _A : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , ) return CLIPTextModel(__lowerCamelCase) def _lowerCamelCase ( self) -> Any: _A : Any = "cpu" # ensure determinism for the device-dependent torch.Generator _A : Tuple = self.dummy_cond_unet_upscale _A : Optional[Any] = DDPMScheduler() _A : List[str] = DDIMScheduler(prediction_type="v_prediction") _A : str = self.dummy_vae _A : Tuple = self.dummy_text_encoder _A : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") _A : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0] _A : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase)).convert("RGB").resize((6_4, 6_4)) # make sure here that pndm scheduler skips prk _A : Dict = StableDiffusionUpscalePipeline( unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=3_5_0 , ) _A : Optional[int] = sd_pipe.to(__lowerCamelCase) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase) _A : List[str] = "A painting of a squirrel eating a burger" _A : Any = torch.Generator(device=__lowerCamelCase).manual_seed(0) _A : Optional[int] = sd_pipe( [prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , ) _A : List[str] = output.images _A : str = torch.Generator(device=__lowerCamelCase).manual_seed(0) _A : Dict = sd_pipe( [prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=__lowerCamelCase , )[0] _A : int = image[0, -3:, -3:, -1] _A : int = image_from_tuple[0, -3:, -3:, -1] _A : List[Any] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _A : Union[str, Any] = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def _lowerCamelCase ( self) -> Optional[Any]: _A : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator _A : Any = self.dummy_cond_unet_upscale _A : Dict = DDPMScheduler() _A : str = DDIMScheduler(prediction_type="v_prediction") _A : List[Any] = self.dummy_vae _A : Union[str, Any] = self.dummy_text_encoder _A : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") _A : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0] _A : List[str] = Image.fromarray(np.uinta(__lowerCamelCase)).convert("RGB").resize((6_4, 6_4)) # make sure here that pndm scheduler skips prk _A : Any = StableDiffusionUpscalePipeline( unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=3_5_0 , ) _A : int = sd_pipe.to(__lowerCamelCase) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase) _A : Dict = "A painting of a squirrel eating a burger" _A : str = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , ) _A : Tuple = output.images assert image.shape[0] == 2 _A : Optional[Any] = torch.Generator(device=__lowerCamelCase).manual_seed(0) _A : str = sd_pipe( [prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , ) _A : List[Any] = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU") def _lowerCamelCase ( self) -> Union[str, Any]: _A : int = self.dummy_cond_unet_upscale _A : Tuple = DDPMScheduler() _A : int = DDIMScheduler(prediction_type="v_prediction") _A : List[Any] = self.dummy_vae _A : Any = self.dummy_text_encoder _A : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") _A : int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0] _A : List[Any] = Image.fromarray(np.uinta(__lowerCamelCase)).convert("RGB").resize((6_4, 6_4)) # put models in fp16, except vae as it overflows in fp16 _A : List[str] = unet.half() _A : List[Any] = text_encoder.half() # make sure here that pndm scheduler skips prk _A : List[str] = StableDiffusionUpscalePipeline( unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=3_5_0 , ) _A : Optional[int] = sd_pipe.to(__lowerCamelCase) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase) _A : Union[str, Any] = "A painting of a squirrel eating a burger" _A : int = torch.manual_seed(0) _A : List[Any] = sd_pipe( [prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type="np" , ).images _A : int = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self) -> Union[str, Any]: _A : Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png") _A : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy") _A : Any = "stabilityai/stable-diffusion-x4-upscaler" _A : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCamelCase) pipe.to(__lowerCamelCase) pipe.set_progress_bar_config(disable=__lowerCamelCase) pipe.enable_attention_slicing() _A : Any = "a cat sitting on a park bench" _A : Any = torch.manual_seed(0) _A : int = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="np" , ) _A : int = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image).max() < 1e-3 def _lowerCamelCase ( self) -> int: _A : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png") _A : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy") _A : Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler" _A : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained( __lowerCamelCase , torch_dtype=torch.floataa , ) pipe.to(__lowerCamelCase) pipe.set_progress_bar_config(disable=__lowerCamelCase) pipe.enable_attention_slicing() _A : str = "a cat sitting on a park bench" _A : Dict = torch.manual_seed(0) _A : Optional[int] = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="np" , ) _A : Optional[int] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image).max() < 5e-1 def _lowerCamelCase ( self) -> Tuple: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _A : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png") _A : int = "stabilityai/stable-diffusion-x4-upscaler" _A : Dict = StableDiffusionUpscalePipeline.from_pretrained( __lowerCamelCase , torch_dtype=torch.floataa , ) pipe.to(__lowerCamelCase) pipe.set_progress_bar_config(disable=__lowerCamelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() _A : str = "a cat sitting on a park bench" _A : Union[str, Any] = torch.manual_seed(0) _A : int = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , output_type="np" , ) _A : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 1_0**9
11
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None: warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase)
11
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowerCAmelCase__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]: _A : int = parent _A : Optional[Any] = batch_size _A : str = image_size _A : Tuple = patch_size _A : Tuple = num_channels _A : Optional[int] = embed_dim _A : Dict = depths _A : Any = num_heads _A : Any = window_size _A : int = mlp_ratio _A : Any = qkv_bias _A : Union[str, Any] = hidden_dropout_prob _A : Optional[Any] = attention_probs_dropout_prob _A : Dict = drop_path_rate _A : List[Any] = hidden_act _A : Any = use_absolute_embeddings _A : Optional[int] = patch_norm _A : Tuple = layer_norm_eps _A : List[str] = initializer_range _A : Optional[int] = is_training _A : Optional[Any] = scope _A : Optional[int] = use_labels _A : Dict = type_sequence_label_size _A : str = encoder_stride _A : Optional[int] = out_features _A : Optional[int] = out_indices def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _A : Optional[Any] = None if self.use_labels: _A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _A : Optional[int] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]: _A : Dict = MaskFormerSwinModel(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : int = model(__lowerCamelCase) _A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) _A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict: _A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : Dict = model(__lowerCamelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4]) # verify ValueError with self.parent.assertRaises(__lowerCamelCase): _A : Union[str, Any] = ["stem"] _A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) def _lowerCamelCase ( self) -> Dict: _A : Any = self.prepare_config_and_inputs() _A , _A , _A : List[Any] = config_and_inputs _A : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def _lowerCamelCase ( self) -> str: _A : Union[str, Any] = MaskFormerSwinModelTester(self) _A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" )) def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self) -> str: return def _lowerCamelCase ( self) -> List[Any]: _A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase) @unittest.skip("Swin does not use inputs_embeds") def _lowerCamelCase ( self) -> str: pass @unittest.skip("Swin does not support feedforward chunking") def _lowerCamelCase ( self) -> List[Any]: pass def _lowerCamelCase ( self) -> Optional[int]: _A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : Union[str, Any] = model_class(__lowerCamelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _A : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear)) def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : int = model_class(__lowerCamelCase) _A : Optional[int] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A : int = [*signature.parameters.keys()] _A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions") def _lowerCamelCase ( self) -> Tuple: pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Any = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() with torch.no_grad(): _A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)) _A : Tuple = outputs.hidden_states _A : Any = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1) self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase) # Swin has a different seq_length _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self) -> Dict: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Optional[int] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> Tuple: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Optional[int] = 3 _A : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Union[str, Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__lowerCamelCase): _A : Optional[int] = 0 return t def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}): with torch.no_grad(): _A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase) _A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple() def recursive_check(__lowerCamelCase , __lowerCamelCase): if isinstance(__lowerCamelCase , (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase): recursive_check(__lowerCamelCase , __lowerCamelCase) elif isinstance(__lowerCamelCase , __lowerCamelCase): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values()): recursive_check(__lowerCamelCase , __lowerCamelCase) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=( "Tuple and dict output are not equal. Difference:" F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has" F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}." ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase) for model_class in self.all_model_classes: _A : List[Any] = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) _A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) @require_torch class lowerCAmelCase__ ( unittest.TestCase , a): '''simple docstring''' __SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else () __SCREAMING_SNAKE_CASE = MaskFormerSwinConfig def _lowerCamelCase ( self) -> Optional[Any]: _A : Tuple = MaskFormerSwinModelTester(self) def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _A : Union[str, Any] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: _A : Optional[Any] = backbone_class(__lowerCamelCase) backbone.to(__lowerCamelCase) backbone.eval() _A : List[Any] = backbone(**__lowerCamelCase) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __lowerCamelCase) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) # Test output_hidden_states=True _A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names)) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _A , _A , _A : List[str] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels)) # Test output_attentions=True if self.has_attentions: _A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase) self.assertIsNotNone(outputs.attentions)
11
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['ConvNextFeatureExtractor'] lowerCAmelCase__ = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
11
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
1
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ): _A : List[Any] = UniSpeechSatForSequenceClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ ) _A : Optional[Any] = downstream_dict["projector.weight"] _A : List[Any] = downstream_dict["projector.bias"] _A : int = downstream_dict["model.post_net.linear.weight"] _A : Optional[int] = downstream_dict["model.post_net.linear.bias"] return model def _UpperCAmelCase (UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ): _A : Tuple = UniSpeechSatForAudioFrameClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ ) _A : Optional[Any] = downstream_dict["model.linear.weight"] _A : Optional[int] = downstream_dict["model.linear.bias"] return model def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ): _A : List[Any] = UniSpeechSatForXVector.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ ) _A : str = downstream_dict["connector.weight"] _A : Optional[int] = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _A : Union[str, Any] = downstream_dict[ f"model.framelevel_feature_extractor.module.{i}.kernel.weight" ] _A : Tuple = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"] _A : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] _A : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] _A : str = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] _A : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] _A : int = downstream_dict["objective.W"] return model @torch.no_grad() def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ): _A : List[Any] = torch.load(UpperCamelCase__ , map_location="cpu" ) _A : List[str] = checkpoint["Downstream"] _A : Union[str, Any] = UniSpeechSatConfig.from_pretrained(UpperCamelCase__ ) _A : int = WavaVecaFeatureExtractor.from_pretrained( UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , do_normalize=UpperCamelCase__ ) _A : List[Any] = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): _A : Union[str, Any] = convert_classification(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) elif arch.endswith("ForAudioFrameClassification" ): _A : List[Any] = convert_diarization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) elif arch.endswith("ForXVector" ): _A : int = convert_xvector(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" ) if hf_config.use_weighted_layer_sum: _A : Optional[int] = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(UpperCamelCase__ ) hf_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') lowerCAmelCase__ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
11
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ): _A , _A : Any = image.size _A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) _A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0 _A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 ) _A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ ) return 2.0 * image - 1.0 class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]: super().__init__() self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase) @torch.no_grad() def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Tuple = 1 elif isinstance(__lowerCamelCase , torch.Tensor): _A : Union[str, Any] = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}") if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Union[str, Any] = preprocess(__lowerCamelCase) _A , _A : Union[str, Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width) _A : str = next(self.unet.parameters()).dtype _A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase) _A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase) # set timesteps and move to the correct device self.scheduler.set_timesteps(__lowerCamelCase , device=self.device) _A : Any = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _A : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) _A : Optional[int] = {} if accepts_eta: _A : List[Any] = eta for t in self.progress_bar(__lowerCamelCase): # concat latents and low resolution image in the channel dimension. _A : List[Any] = torch.cat([latents, image] , dim=1) _A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase) # predict the noise residual _A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample # compute the previous noisy sample x_t -> x_t-1 _A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample # decode the image latents with the VQVAE _A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample _A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0) _A : Tuple = image / 2 + 0.5 _A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _A : Optional[int] = self.numpy_to_pil(__lowerCamelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase)
11
1
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[list[str]] , UpperCamelCase__ : int , ): _A : Optional[int] = len(UpperCamelCase__ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(UpperCamelCase__ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCamelCase__ , UpperCamelCase__ , ) def _UpperCAmelCase (UpperCamelCase__ : int ): _A : list[list[str]] = [] depth_first_search([] , [] , [] , UpperCamelCase__ , UpperCamelCase__ ) # Print all the boards for board in boards: for column in board: print(UpperCamelCase__ ) print("" ) print(len(UpperCamelCase__ ) , "solutions were found." ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
11
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = VQModel __SCREAMING_SNAKE_CASE = "sample" @property def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]: _A : Optional[int] = 4 _A : Tuple = 3 _A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase) return {"sample": image} @property def _lowerCamelCase ( self) -> int: return (3, 3_2, 3_2) @property def _lowerCamelCase ( self) -> List[Any]: return (3, 3_2, 3_2) def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[Any] = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } _A : int = self.dummy_input return init_dict, inputs_dict def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> Any: pass def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase) self.assertIsNotNone(__lowerCamelCase) self.assertEqual(len(loading_info["missing_keys"]) , 0) model.to(__lowerCamelCase) _A : str = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def _lowerCamelCase ( self) -> Union[str, Any]: _A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy") model.to(__lowerCamelCase).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) _A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) _A : Optional[int] = image.to(__lowerCamelCase) with torch.no_grad(): _A : List[str] = model(__lowerCamelCase).sample _A : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3]) # fmt: on self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
11
1
def _UpperCAmelCase (UpperCamelCase__ : int ): if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise TypeError("Input value must be a 'int' type" ) return bin(UpperCamelCase__ ).count("1" ) if __name__ == "__main__": import doctest doctest.testmod()
11
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } lowerCAmelCase__ = { 'facebook/mbart-large-en-ro': 10_24, 'facebook/mbart-large-cc25': 10_24, } # fmt: off lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = MBartTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it _A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , ) _A : Union[str, Any] = vocab_file _A : int = False if not self.vocab_file else True _A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "en_XX" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : List[str] = [self.sep_token_id] _A : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : str = src_lang _A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Dict = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding: _A : Any = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> List[str]: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : int = self.convert_tokens_to_ids(__lowerCamelCase) _A : int = [] _A : List[str] = [self.eos_token_id, self.cur_lang_code] _A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : str = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase) _A : List[Any] = [] _A : str = [self.eos_token_id, self.cur_lang_code] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : str = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : int = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
1
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): _A : List[Any] = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCamelCase__ , UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ): _A , _A : List[str] = emb.weight.shape _A : Union[str, Any] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ ) _A : Dict = emb.weight.data return lin_layer def _UpperCAmelCase (UpperCamelCase__ : int ): _A : Tuple = torch.load(UpperCamelCase__ , map_location="cpu" ) _A : int = mam_aaa["args"] or mam_aaa["cfg"]["model"] _A : int = mam_aaa["model"] remove_ignore_keys_(UpperCamelCase__ ) _A : Union[str, Any] = state_dict["encoder.embed_tokens.weight"].shape[0] _A : List[str] = MaMaaaConfig( vocab_size=UpperCamelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , ) _A : Optional[Any] = state_dict["decoder.embed_tokens.weight"] _A : int = MaMaaaForConditionalGeneration(UpperCamelCase__ ) model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) _A : Any = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
11
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'vocab_file': 'vocab.json', 'tokenizer_config_file': 'tokenizer_config.json', 'merges_file': 'merges.txt', } lowerCAmelCase__ = { 'vocab_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json' ), }, 'tokenizer_config_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json' ), }, 'merges_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt' ), }, } lowerCAmelCase__ = '</w>' lowerCAmelCase__ = '@@ ' def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ): _A : Optional[int] = set() _A : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A : List[Any] = char return pairs # Speech2Text2 has no max input length lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24} class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]: super().__init__( unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , ) _A : Dict = do_lower_case with open(__lowerCamelCase , encoding="utf-8") as vocab_handle: _A : Optional[int] = json.load(__lowerCamelCase) _A : Optional[Any] = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.") _A : Optional[Any] = None _A : Tuple = None else: with open(__lowerCamelCase , encoding="utf-8") as merges_handle: _A : Optional[int] = merges_handle.read().split("\n")[:-1] _A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges] _A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase)))) _A : List[Any] = {} @property def _lowerCamelCase ( self) -> int: return len(self.decoder) def _lowerCamelCase ( self) -> Dict: return dict(self.encoder , **self.added_tokens_encoder) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: _A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A : int = get_pairs(__lowerCamelCase) if not pairs: return token while True: _A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf"))) if bigram not in self.bpe_ranks: break _A , _A : Optional[int] = bigram _A : int = [] _A : str = 0 while i < len(__lowerCamelCase): try: _A : str = word.index(__lowerCamelCase , __lowerCamelCase) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) _A : str = j if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 _A : List[str] = tuple(__lowerCamelCase) _A : List[str] = new_word if len(__lowerCamelCase) == 1: break else: _A : List[Any] = get_pairs(__lowerCamelCase) _A : Tuple = " ".join(__lowerCamelCase) if word == "\n " + BPE_TOKEN_MERGES: _A : List[str] = "\n" + BPE_TOKEN_MERGES if word.endswith(__lowerCamelCase): _A : int = word.replace(__lowerCamelCase , "") _A : int = word.replace(" " , __lowerCamelCase) _A : Union[str, Any] = word return word def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding." "Make sure to provide `merges.txt` file at instantiation to enable " "encoding.") if self.do_lower_case: _A : List[Any] = text.lower() _A : Optional[int] = text.split() _A : List[str] = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" "))) return split_tokens def _lowerCamelCase ( self , __lowerCamelCase) -> int: return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token)) def _lowerCamelCase ( self , __lowerCamelCase) -> str: _A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token) return result def _lowerCamelCase ( self , __lowerCamelCase) -> str: _A : str = " ".join(__lowerCamelCase) # make sure @@ tokens are concatenated _A : int = "".join(string.split(__lowerCamelCase)) return string def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return _A : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) _A : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]) with open(__lowerCamelCase , "w" , encoding="utf-8") as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n") _A : Union[str, Any] = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCamelCase , "w" , encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]): if index != token_index: logger.warning( F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!") _A : Optional[int] = token_index writer.write(" ".join(__lowerCamelCase) + "\n") index += 1 return (vocab_file, merges_file)
11
1
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : int ): while a != 0: _A , _A : Optional[Any] = b % a, a return b def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : int ): if gcd(UpperCamelCase__ , UpperCamelCase__ ) != 1: _A : int = f"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(UpperCamelCase__ ) _A , _A , _A : Dict = 1, 0, a _A , _A , _A : Any = 0, 1, m while va != 0: _A : List[Any] = ua // va _A , _A , _A , _A , _A , _A : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
11
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "vit_mae" def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int: super().__init__(**__lowerCamelCase) _A : int = hidden_size _A : List[str] = num_hidden_layers _A : List[Any] = num_attention_heads _A : Optional[Any] = intermediate_size _A : Optional[int] = hidden_act _A : List[Any] = hidden_dropout_prob _A : List[Any] = attention_probs_dropout_prob _A : Union[str, Any] = initializer_range _A : str = layer_norm_eps _A : Any = image_size _A : int = patch_size _A : int = num_channels _A : Dict = qkv_bias _A : Tuple = decoder_num_attention_heads _A : Tuple = decoder_hidden_size _A : List[str] = decoder_num_hidden_layers _A : Optional[Any] = decoder_intermediate_size _A : List[str] = mask_ratio _A : Union[str, Any] = norm_pix_loss
11
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @slow def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[str] = XLMRobertaModel.from_pretrained("xlm-roberta-base") _A : str = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]]) # The dog is cute and lives in the garden house _A : List[Any] = torch.Size((1, 1_2, 7_6_8)) # batch_size, sequence_length, embedding_vector_dim _A : List[Any] = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _A : List[Any] = model(__lowerCamelCase)["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3)) @slow def _lowerCamelCase ( self) -> List[Any]: _A : Dict = XLMRobertaModel.from_pretrained("xlm-roberta-large") _A : int = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]]) # The dog is cute and lives in the garden house _A : List[Any] = torch.Size((1, 1_2, 1_0_2_4)) # batch_size, sequence_length, embedding_vector_dim _A : str = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _A : int = model(__lowerCamelCase)["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3))
11
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
1
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ): _A , _A : Union[str, Any] = position _A : str = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A : Dict = [] for position in positions: _A , _A : List[str] = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(UpperCamelCase__ ) return permissible_positions def _UpperCAmelCase (UpperCamelCase__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _UpperCAmelCase (UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ): if is_complete(UpperCamelCase__ ): return True for position in get_valid_pos(UpperCamelCase__ , len(UpperCamelCase__ ) ): _A , _A : Union[str, Any] = position if board[y][x] == 0: _A : Optional[Any] = curr + 1 if open_knight_tour_helper(UpperCamelCase__ , UpperCamelCase__ , curr + 1 ): return True _A : Union[str, Any] = 0 return False def _UpperCAmelCase (UpperCamelCase__ : int ): _A : Tuple = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )] for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): _A : Union[str, Any] = 1 if open_knight_tour_helper(UpperCamelCase__ , (i, j) , 1 ): return board _A : Any = 0 _A : Union[str, Any] = f"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
11
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase__ = float('nan') class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase) -> Optional[Any]: _A : List[Any] = sys.stdout _A : str = open(__lowerCamelCase , "a") def __getattr__( self , __lowerCamelCase) -> List[str]: return getattr(self.stdout , __lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> str: self.stdout.write(__lowerCamelCase) # strip tqdm codes self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M)) def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ): _A : Tuple = [] # deal with critical env vars _A : Dict = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: _A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ ) if val is not None: cmd.append(f"{key}={val}" ) # python executable (not always needed if the script is executable) _A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(UpperCamelCase__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes _A : Tuple = [] _A : Dict = "" while len(UpperCamelCase__ ) > 0: current_line += f"{cmd.pop(0 )} " if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(UpperCamelCase__ ) _A : Union[str, Any] = "" return "\\\n".join(UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ): # unwrap multi-line input _A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own _A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += f" --output_dir {output_dir}" # ensure we have --overwrite_output_dir _A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , ) _A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams _A : Tuple = variation.replace(" " , "-" ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f: f.write(result.stdout ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f: _A : List[str] = json.load(UpperCamelCase__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ): _A : Union[str, Any] = [] _A : Optional[int] = [] _A : Any = f"{id}: {variation:<{longest_variation_len}}" _A : Dict = f"{preamble}: " _A : Union[str, Any] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ): _A : Optional[Any] = process_run_single( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _A : Optional[Any] = single_run_metrics[target_metric_key] if not math.isnan(UpperCamelCase__ ): metrics.append(UpperCamelCase__ ) results.append(UpperCamelCase__ ) outcome += "✓" else: outcome += "✘" _A : str = f"\33[2K\r{outcome}" if len(UpperCamelCase__ ) > 0: _A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} _A : Any = round(mean_metrics[target_metric_key] , 2 ) _A : Tuple = f"{outcome} {mean_target}" if len(UpperCamelCase__ ) > 1: results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}" print(UpperCamelCase__ ) _A : Optional[int] = variation return mean_metrics else: print(UpperCamelCase__ ) return {variation_key: variation, target_metric_key: nan} def _UpperCAmelCase (): _A : int = torch.cuda.get_device_properties(torch.device("cuda" ) ) return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n" def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ): _A : Any = pd.DataFrame(UpperCamelCase__ ) _A : List[str] = "variation" _A : List[Any] = "diff_%" _A : int = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan _A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(UpperCamelCase__ ): # as a fallback, use the minimal value as the sentinel _A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(UpperCamelCase__ ): _A : Optional[Any] = df.apply( lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns _A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys] _A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols # capitalize _A : Tuple = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible _A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" ) _A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" ) _A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] print("\n\n".join(UpperCamelCase__ ) ) def _UpperCAmelCase (): _A : int = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , ) parser.add_argument( "--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) _A : int = parser.parse_args() _A : Union[str, Any] = args.output_dir Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) _A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ ) # split each dimension into its --foo variations _A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty _A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) ) _A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations ) # split wanted keys _A : str = args.report_metric_keys.split() # capture prints into a log file for convenience _A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt" print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" ) print(f"and this script's output is also piped into {report_fn}" ) _A : Tuple = Tee(UpperCamelCase__ ) print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" ) print(f"Base command: {' '.join(UpperCamelCase__ )}" ) _A : str = "variation" _A : Union[str, Any] = [] for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ): _A : Dict = base_cmd + variation.split() results.append( process_run( id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) ) process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ ) if __name__ == "__main__": main()
11
1
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'vocab.json'} lowerCAmelCase__ = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } lowerCAmelCase__ = {'mgp-str': 27} class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __lowerCamelCase , __lowerCamelCase="[GO]" , __lowerCamelCase="[GO]" , __lowerCamelCase="[s]" , __lowerCamelCase="[GO]" , **__lowerCamelCase) -> Optional[Any]: super().__init__( unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8") as vocab_handle: _A : Optional[Any] = json.load(__lowerCamelCase) _A : Tuple = {v: k for k, v in self.vocab.items()} @property def _lowerCamelCase ( self) -> Union[str, Any]: return len(self.vocab) def _lowerCamelCase ( self) -> int: return dict(self.vocab , **self.added_tokens_encoder) def _lowerCamelCase ( self , __lowerCamelCase) -> List[Any]: _A : int = [] for s in text: char_tokens.extend(__lowerCamelCase) return char_tokens def _lowerCamelCase ( self , __lowerCamelCase) -> List[Any]: return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token)) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: return self.decoder.get(__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase): logger.error("Vocabulary path ({}) should be a directory".format(__lowerCamelCase)) return _A : Dict = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) with open(__lowerCamelCase , "w" , encoding="utf-8") as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n") return (vocab_file,)
11
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') lowerCAmelCase__ = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __SCREAMING_SNAKE_CASE = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Overwrite the cached training and evaluation sets"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "The number of processes to use for the preprocessing."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def _lowerCamelCase ( self) -> int: if self.train_file is not None: _A : Optional[int] = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _A : Dict = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def __call__( self , __lowerCamelCase) -> str: _A : List[Any] = "label" if "label" in features[0].keys() else "labels" _A : Any = [feature.pop(__lowerCamelCase) for feature in features] _A : Optional[int] = len(__lowerCamelCase) _A : int = len(features[0]["input_ids"]) _A : Tuple = [ [{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features ] _A : str = list(chain(*__lowerCamelCase)) _A : Tuple = self.tokenizer.pad( __lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten _A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()} # Add back labels _A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa) return batch def _UpperCAmelCase (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A : int = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) datasets.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _A : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _A : List[str] = {} if data_args.train_file is not None: _A : Optional[int] = data_args.train_file if data_args.validation_file is not None: _A : Tuple = data_args.validation_file _A : Union[str, Any] = data_args.train_file.split("." )[-1] _A : List[str] = load_dataset( UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. _A : Union[str, Any] = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _A : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _A : List[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _A : str = [f"ending{i}" for i in range(4 )] _A : Union[str, Any] = "sent1" _A : str = "sent2" if data_args.max_seq_length is None: _A : Any = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) _A : Optional[Any] = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _A : int = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCamelCase__ : List[Any] ): _A : List[Any] = [[context] * 4 for context in examples[context_name]] _A : Any = examples[question_header_name] _A : Union[str, Any] = [ [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ ) ] # Flatten out _A : Dict = list(chain(*UpperCamelCase__ ) ) _A : List[Any] = list(chain(*UpperCamelCase__ ) ) # Tokenize _A : str = tokenizer( UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) _A : Optional[int] = raw_datasets["train"] if data_args.max_train_samples is not None: _A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples ) _A : Any = train_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): _A : Optional[int] = train_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) _A : Optional[int] = raw_datasets["validation"] if data_args.max_eval_samples is not None: _A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples ) _A : Dict = eval_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): _A : List[str] = eval_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator _A : str = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCamelCase__ : Tuple ): _A , _A : List[str] = eval_predictions _A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _A : List[str] = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: _A : Any = None if training_args.resume_from_checkpoint is not None: _A : Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A : int = last_checkpoint _A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) trainer.save_model() # Saves the tokenizer too for easy upload _A : Optional[int] = train_result.metrics _A : Tuple = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ ) ) _A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("train" , UpperCamelCase__ ) trainer.save_metrics("train" , UpperCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _A : List[Any] = trainer.evaluate() _A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ ) _A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("eval" , UpperCamelCase__ ) trainer.save_metrics("eval" , UpperCamelCase__ ) _A : Tuple = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase__ ) else: trainer.create_model_card(**UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
11
1
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None: warnings.warn( "The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use MobileViTImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase)
11
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ]) class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self) -> str: if self.framework == "pytorch": subprocess.run( F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , ) assert hasattr(self , "env") def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: _A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings _A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , ) def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv") @parameterized.expand([(2,)]) def _lowerCamelCase ( self , __lowerCamelCase) -> Any: # create estimator _A : Union[str, Any] = self.create_estimator(__lowerCamelCase) # run training estimator.fit() # result dataframe _A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) _A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _A : Optional[Any] = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
11
1
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) lowerCAmelCase__ = logging.getLogger(__name__) def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : str ): _A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 ) return np.sum(outputs == labels ) def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ): with open(UpperCamelCase__ , encoding="utf_8" ) as f: _A : str = csv.reader(UpperCamelCase__ ) _A : List[Any] = [] next(UpperCamelCase__ ) # skip the first line for line in tqdm(UpperCamelCase__ ): output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def _UpperCAmelCase (UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ): _A : int = [] for dataset in encoded_datasets: _A : Tuple = len(UpperCamelCase__ ) _A : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) _A : Optional[int] = np.zeros((n_batch, 2) , dtype=np.intaa ) _A : Any = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) _A : List[Any] = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(UpperCamelCase__ ): _A : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _A : List[str] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _A : int = with_conta _A : str = with_conta _A : int = len(UpperCamelCase__ ) - 1 _A : str = len(UpperCamelCase__ ) - 1 _A : List[Any] = with_conta _A : str = with_conta _A : Union[str, Any] = mc_label _A : int = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(UpperCamelCase__ ) for t in all_inputs ) ) return tensor_datasets def _UpperCAmelCase (): _A : Optional[int] = argparse.ArgumentParser() parser.add_argument("--model_name" , type=UpperCamelCase__ , default="openai-gpt" , help="pretrained model name" ) parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." ) parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." ) parser.add_argument( "--output_dir" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The output directory where the model predictions and checkpoints will be written." , ) parser.add_argument("--train_dataset" , type=UpperCamelCase__ , default="" ) parser.add_argument("--eval_dataset" , type=UpperCamelCase__ , default="" ) parser.add_argument("--seed" , type=UpperCamelCase__ , default=42 ) parser.add_argument("--num_train_epochs" , type=UpperCamelCase__ , default=3 ) parser.add_argument("--train_batch_size" , type=UpperCamelCase__ , default=8 ) parser.add_argument("--eval_batch_size" , type=UpperCamelCase__ , default=16 ) parser.add_argument("--adam_epsilon" , default=1E-8 , type=UpperCamelCase__ , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , type=UpperCamelCase__ , default=1 ) parser.add_argument( "--max_steps" , default=-1 , type=UpperCamelCase__ , help=( "If > 0: set total number of training steps to perform. Override num_train_epochs." ) , ) parser.add_argument( "--gradient_accumulation_steps" , type=UpperCamelCase__ , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , ) parser.add_argument("--learning_rate" , type=UpperCamelCase__ , default=6.25E-5 ) parser.add_argument("--warmup_steps" , default=0 , type=UpperCamelCase__ , help="Linear warmup over warmup_steps." ) parser.add_argument("--lr_schedule" , type=UpperCamelCase__ , default="warmup_linear" ) parser.add_argument("--weight_decay" , type=UpperCamelCase__ , default=0.01 ) parser.add_argument("--lm_coef" , type=UpperCamelCase__ , default=0.9 ) parser.add_argument("--n_valid" , type=UpperCamelCase__ , default=374 ) parser.add_argument("--server_ip" , type=UpperCamelCase__ , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=UpperCamelCase__ , default="" , help="Can be used for distant debugging." ) _A : Optional[int] = parser.parse_args() print(UpperCamelCase__ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) _A : List[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) _A : Optional[int] = torch.cuda.device_count() logger.info("device: {}, n_gpu {}".format(UpperCamelCase__ , UpperCamelCase__ ) ) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True." ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset _A : Tuple = ["_start_", "_delimiter_", "_classify_"] _A : Tuple = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(UpperCamelCase__ ) _A : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) _A : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(UpperCamelCase__ ) ) model.to(UpperCamelCase__ ) # Load and encode the datasets def tokenize_and_encode(UpperCamelCase__ : int ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCamelCase__ ) ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): return obj return [tokenize_and_encode(UpperCamelCase__ ) for o in obj] logger.info("Encoding dataset..." ) _A : Any = load_rocstories_dataset(args.train_dataset ) _A : Union[str, Any] = load_rocstories_dataset(args.eval_dataset ) _A : List[str] = (train_dataset, eval_dataset) _A : int = tokenize_and_encode(UpperCamelCase__ ) # Compute the max input length for the Transformer _A : str = model.config.n_positions // 2 - 2 _A : Any = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) _A : Dict = min(UpperCamelCase__ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders _A : Dict = pre_process_datasets(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) _A , _A : List[Any] = tensor_datasets[0], tensor_datasets[1] _A : Dict = TensorDataset(*UpperCamelCase__ ) _A : Optional[int] = RandomSampler(UpperCamelCase__ ) _A : int = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.train_batch_size ) _A : int = TensorDataset(*UpperCamelCase__ ) _A : List[Any] = SequentialSampler(UpperCamelCase__ ) _A : Optional[Any] = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: _A : Tuple = args.max_steps _A : Tuple = args.max_steps // (len(UpperCamelCase__ ) // args.gradient_accumulation_steps) + 1 else: _A : Any = len(UpperCamelCase__ ) // args.gradient_accumulation_steps * args.num_train_epochs _A : List[str] = list(model.named_parameters() ) _A : List[Any] = ["bias", "LayerNorm.bias", "LayerNorm.weight"] _A : Dict = [ { "params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], "weight_decay": args.weight_decay, }, {"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0}, ] _A : Union[str, Any] = AdamW(UpperCamelCase__ , lr=args.learning_rate , eps=args.adam_epsilon ) _A : List[Any] = get_linear_schedule_with_warmup( UpperCamelCase__ , num_warmup_steps=args.warmup_steps , num_training_steps=UpperCamelCase__ ) if args.do_train: _A , _A , _A : Union[str, Any] = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ): _A : Tuple = 0 _A : str = 0 _A : Dict = tqdm(UpperCamelCase__ , desc="Training" ) for step, batch in enumerate(UpperCamelCase__ ): _A : List[Any] = tuple(t.to(UpperCamelCase__ ) for t in batch ) _A , _A , _A , _A : List[str] = batch _A : int = model(UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ ) _A : str = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() _A : Any = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 _A : Optional[Any] = "Training loss: {:.2e} lr: {:.2e}".format(UpperCamelCase__ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer _A : List[Any] = model.module if hasattr(UpperCamelCase__ , "module" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` _A : List[str] = os.path.join(args.output_dir , UpperCamelCase__ ) _A : Dict = os.path.join(args.output_dir , UpperCamelCase__ ) torch.save(model_to_save.state_dict() , UpperCamelCase__ ) model_to_save.config.to_json_file(UpperCamelCase__ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned _A : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) _A : int = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(UpperCamelCase__ ) if args.do_eval: model.eval() _A , _A : str = 0, 0 _A , _A : Any = 0, 0 for batch in tqdm(UpperCamelCase__ , desc="Evaluating" ): _A : Union[str, Any] = tuple(t.to(UpperCamelCase__ ) for t in batch ) _A , _A , _A , _A : Any = batch with torch.no_grad(): _A , _A , _A , _A : Tuple = model( UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ ) _A : List[Any] = mc_logits.detach().cpu().numpy() _A : Optional[Any] = mc_labels.to("cpu" ).numpy() _A : Union[str, Any] = accuracy(UpperCamelCase__ , UpperCamelCase__ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 _A : Any = eval_loss / nb_eval_steps _A : int = eval_accuracy / nb_eval_examples _A : Dict = tr_loss / nb_tr_steps if args.do_train else None _A : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss} _A : Union[str, Any] = os.path.join(args.output_dir , "eval_results.txt" ) with open(UpperCamelCase__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key in sorted(result.keys() ): logger.info(" %s = %s" , UpperCamelCase__ , str(result[key] ) ) writer.write("%s = %s\n" % (key, str(result[key] )) ) if __name__ == "__main__": main()
11
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"] __SCREAMING_SNAKE_CASE = "OwlViTImageProcessor" __SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]: _A : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __lowerCamelCase , ) _A : List[Any] = kwargs.pop("feature_extractor") _A : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(__lowerCamelCase , __lowerCamelCase) def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none.") if text is not None: if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)): _A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)] elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase): _A : Optional[Any] = [] # Maximum number of queries across batch _A : str = max([len(__lowerCamelCase) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(__lowerCamelCase) != max_num_queries: _A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase)) _A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) encodings.append(__lowerCamelCase) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings") if return_tensors == "np": _A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch _A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0) _A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0) else: raise ValueError("Target return tensor type could not be returned") _A : Optional[Any] = BatchEncoding() _A : Tuple = input_ids _A : Dict = attention_mask if query_images is not None: _A : Optional[Any] = BatchEncoding() _A : List[str] = self.image_processor( __lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values _A : Union[str, Any] = query_pixel_values if images is not None: _A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) if text is not None and images is not None: _A : Tuple = image_features.pixel_values return encoding elif query_images is not None and images is not None: _A : int = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]: return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase) @property def _lowerCamelCase ( self) -> int: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , ) return self.image_processor_class @property def _lowerCamelCase ( self) -> List[str]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , ) return self.image_processor
11
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "yolos" def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=[5_1_2, 8_6_4] , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_0_0 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=1 , __lowerCamelCase=5 , __lowerCamelCase=2 , __lowerCamelCase=5 , __lowerCamelCase=2 , __lowerCamelCase=0.1 , **__lowerCamelCase , ) -> Optional[Any]: super().__init__(**__lowerCamelCase) _A : Optional[Any] = hidden_size _A : Union[str, Any] = num_hidden_layers _A : str = num_attention_heads _A : int = intermediate_size _A : Optional[int] = hidden_act _A : Optional[Any] = hidden_dropout_prob _A : Union[str, Any] = attention_probs_dropout_prob _A : Optional[Any] = initializer_range _A : Dict = layer_norm_eps _A : Any = image_size _A : str = patch_size _A : Optional[Any] = num_channels _A : str = qkv_bias _A : str = num_detection_tokens _A : Tuple = use_mid_position_embeddings _A : List[str] = auxiliary_loss # Hungarian matcher _A : Dict = class_cost _A : int = bbox_cost _A : Any = giou_cost # Loss coefficients _A : Optional[Any] = bbox_loss_coefficient _A : Dict = giou_loss_coefficient _A : Optional[Any] = eos_coefficient class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = version.parse("1.11") @property def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def _lowerCamelCase ( self) -> float: return 1e-4 @property def _lowerCamelCase ( self) -> int: return 1_2
11
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]): _A : Optional[int] = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(__lowerCamelCase) def _lowerCamelCase ( self) -> int: _A : Optional[int] = "sshleifer/tiny-gpt2" _A : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : List[str] = PyTorchBenchmark(__lowerCamelCase) _A : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Dict: _A : int = "sgugger/tiny-distilbert-classification" _A : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , ) _A : Dict = PyTorchBenchmark(__lowerCamelCase) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Optional[Any]: _A : Tuple = "sshleifer/tiny-gpt2" _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase) _A : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision") def _lowerCamelCase ( self) -> int: _A : Any = "sshleifer/tiny-gpt2" _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Any = PyTorchBenchmark(__lowerCamelCase) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Any: _A : Union[str, Any] = "sshleifer/tiny-gpt2" _A : Any = AutoConfig.from_pretrained(__lowerCamelCase) # set architectures equal to `None` _A : Dict = None _A : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> int: _A : List[Any] = "sshleifer/tiny-gpt2" _A : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase) _A : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == "cpu" , "Can't do half precision") def _lowerCamelCase ( self) -> Optional[Any]: _A : Any = "sshleifer/tiny-gpt2" _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , ) _A : List[Any] = PyTorchBenchmark(__lowerCamelCase) _A : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> str: _A : List[str] = "sshleifer/tiny-gpt2" _A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> int: _A : Tuple = "sshleifer/tinier_bart" _A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> str: _A : List[Any] = "sshleifer/tiny-gpt2" _A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> int: _A : int = "sshleifer/tinier_bart" _A : str = AutoConfig.from_pretrained(__lowerCamelCase) _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> Dict: _A : List[str] = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , ) _A : Tuple = PyTorchBenchmark(__lowerCamelCase) benchmark.run() self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists()) def _lowerCamelCase ( self) -> int: _A : Dict = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(__lowerCamelCase): self.assertTrue(hasattr(__lowerCamelCase , "sequential")) self.assertTrue(hasattr(__lowerCamelCase , "cumulative")) self.assertTrue(hasattr(__lowerCamelCase , "current")) self.assertTrue(hasattr(__lowerCamelCase , "total")) with tempfile.TemporaryDirectory() as tmp_dir: _A : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , ) _A : Optional[int] = PyTorchBenchmark(__lowerCamelCase) _A : Dict = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
11
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "upernet" def __init__( self , __lowerCamelCase=None , __lowerCamelCase=5_1_2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=[1, 2, 3, 6] , __lowerCamelCase=True , __lowerCamelCase=0.4 , __lowerCamelCase=3_8_4 , __lowerCamelCase=2_5_6 , __lowerCamelCase=1 , __lowerCamelCase=False , __lowerCamelCase=2_5_5 , **__lowerCamelCase , ) -> Optional[Any]: super().__init__(**__lowerCamelCase) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") _A : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(__lowerCamelCase , __lowerCamelCase): _A : Union[str, Any] = backbone_config.get("model_type") _A : Dict = CONFIG_MAPPING[backbone_model_type] _A : Union[str, Any] = config_class.from_dict(__lowerCamelCase) _A : Optional[int] = backbone_config _A : str = hidden_size _A : int = initializer_range _A : Optional[int] = pool_scales _A : Union[str, Any] = use_auxiliary_head _A : Any = auxiliary_loss_weight _A : Union[str, Any] = auxiliary_in_channels _A : int = auxiliary_channels _A : List[str] = auxiliary_num_convs _A : Dict = auxiliary_concat_input _A : List[Any] = loss_ignore_index def _lowerCamelCase ( self) -> List[str]: _A : int = copy.deepcopy(self.__dict__) _A : List[Any] = self.backbone_config.to_dict() _A : List[Any] = self.__class__.model_type return output
11
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } lowerCAmelCase__ = { 'facebook/nllb-large-en-ro': 10_24, 'facebook/nllb-200-distilled-600M': 10_24, } # fmt: off lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = NllbTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it _A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token _A : Optional[int] = legacy_behaviour super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , ) _A : int = vocab_file _A : Optional[Any] = False if not self.vocab_file else True _A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "eng_Latn" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : List[str] = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : Tuple = [self.sep_token_id] _A : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : List[Any] = src_lang _A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Tuple = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding: _A : Tuple = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> str: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[str]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = self.convert_tokens_to_ids(__lowerCamelCase) if self.legacy_behaviour: _A : List[str] = [] _A : Dict = [self.eos_token_id, self.cur_lang_code] else: _A : Tuple = [self.cur_lang_code] _A : Optional[Any] = [self.eos_token_id] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase) if self.legacy_behaviour: _A : Tuple = [] _A : Any = [self.eos_token_id, self.cur_lang_code] else: _A : Union[str, Any] = [self.cur_lang_code] _A : str = [self.eos_token_id] _A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : Dict = self.convert_ids_to_tokens(self.suffix_tokens) _A : Union[str, Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : Dict = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=0.9_99 , UpperCamelCase__ : List[Any]="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(UpperCamelCase__ : Tuple ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(UpperCamelCase__ : List[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" ) _A : Optional[int] = [] for i in range(UpperCamelCase__ ): _A : int = i / num_diffusion_timesteps _A : str = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) ) return torch.tensor(UpperCamelCase__ , dtype=torch.floataa ) class lowerCAmelCase__ ( a , a): '''simple docstring''' __SCREAMING_SNAKE_CASE = [e.name for e in KarrasDiffusionSchedulers] __SCREAMING_SNAKE_CASE = 2 @register_to_config def __init__( self , __lowerCamelCase = 1_0_0_0 , __lowerCamelCase = 0.0_0_0_8_5 , __lowerCamelCase = 0.0_1_2 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = "epsilon" , __lowerCamelCase = "linspace" , __lowerCamelCase = 0 , ) -> Optional[int]: if trained_betas is not None: _A : List[str] = torch.tensor(__lowerCamelCase , dtype=torch.floataa) elif beta_schedule == "linear": _A : int = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _A : int = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _A : Dict = betas_for_alpha_bar(__lowerCamelCase) else: raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}") _A : List[Any] = 1.0 - self.betas _A : Tuple = torch.cumprod(self.alphas , dim=0) # set all values self.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=None) -> List[Any]: if schedule_timesteps is None: _A : List[str] = self.timesteps _A : str = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter) == 0: _A : Any = 1 if len(__lowerCamelCase) > 1 else 0 else: _A : List[Any] = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase) else timestep _A : List[Any] = self._index_counter[timestep_int] return indices[pos].item() @property def _lowerCamelCase ( self) -> Union[str, Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , ) -> torch.FloatTensor: _A : Optional[Any] = self.index_for_timestep(__lowerCamelCase) if self.state_in_first_order: _A : Tuple = self.sigmas[step_index] else: _A : Union[str, Any] = self.sigmas_interpol[step_index] _A : Dict = sample / ((sigma**2 + 1) ** 0.5) return sample def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , ) -> Any: _A : List[Any] = num_inference_steps _A : Optional[Any] = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _A : str = np.linspace(0 , num_train_timesteps - 1 , __lowerCamelCase , dtype=__lowerCamelCase)[::-1].copy() elif self.config.timestep_spacing == "leading": _A : Optional[int] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _A : List[Any] = (np.arange(0 , __lowerCamelCase) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _A : int = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _A : str = (np.arange(__lowerCamelCase , 0 , -step_ratio)).round().copy().astype(__lowerCamelCase) timesteps -= 1 else: raise ValueError( F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") _A : Optional[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) _A : str = torch.from_numpy(np.log(__lowerCamelCase)).to(__lowerCamelCase) _A : str = np.interp(__lowerCamelCase , np.arange(0 , len(__lowerCamelCase)) , __lowerCamelCase) _A : Optional[Any] = np.concatenate([sigmas, [0.0]]).astype(np.floataa) _A : Dict = torch.from_numpy(__lowerCamelCase).to(device=__lowerCamelCase) # interpolate sigmas _A : Union[str, Any] = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp() _A : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]]) _A : Union[str, Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]]) if str(__lowerCamelCase).startswith("mps"): # mps does not support float64 _A : str = torch.from_numpy(__lowerCamelCase).to(__lowerCamelCase , dtype=torch.floataa) else: _A : int = torch.from_numpy(__lowerCamelCase).to(__lowerCamelCase) # interpolate timesteps _A : Dict = self.sigma_to_t(__lowerCamelCase).to(__lowerCamelCase , dtype=timesteps.dtype) _A : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten() _A : int = torch.cat([timesteps[:1], interleaved_timesteps]) _A : List[str] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _A : str = defaultdict(__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> str: # get log sigma _A : Any = sigma.log() # get distribution _A : List[str] = log_sigma - self.log_sigmas[:, None] # get sigmas range _A : Union[str, Any] = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2) _A : Optional[Any] = low_idx + 1 _A : List[str] = self.log_sigmas[low_idx] _A : Any = self.log_sigmas[high_idx] # interpolate sigmas _A : Optional[Any] = (low - log_sigma) / (low - high) _A : Dict = w.clamp(0 , 1) # transform interpolation to time range _A : List[Any] = (1 - w) * low_idx + w * high_idx _A : Any = t.view(sigma.shape) return t @property def _lowerCamelCase ( self) -> Any: return self.sample is None def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]: _A : Optional[int] = self.index_for_timestep(__lowerCamelCase) # advance index counter by 1 _A : Optional[int] = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _A : Any = self.sigmas[step_index] _A : Any = self.sigmas_interpol[step_index + 1] _A : str = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _A : Any = self.sigmas[step_index - 1] _A : List[str] = self.sigmas_interpol[step_index] _A : int = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _A : Dict = 0 _A : Optional[int] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _A : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_interpol _A : Union[str, Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _A : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol _A : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("prediction_type not implemented yet: sample") else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`") if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _A : Tuple = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _A : Union[str, Any] = sigma_interpol - sigma_hat # store for 2nd order step _A : Union[str, Any] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _A : int = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _A : Union[str, Any] = sigma_next - sigma_hat _A : Optional[Any] = self.sample _A : Any = None _A : Optional[Any] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples _A : List[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype) if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase): # mps does not support float64 _A : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa) _A : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa) else: _A : Dict = self.timesteps.to(original_samples.device) _A : int = timesteps.to(original_samples.device) _A : Dict = [self.index_for_timestep(__lowerCamelCase , __lowerCamelCase) for t in timesteps] _A : Dict = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): _A : Dict = sigma.unsqueeze(-1) _A : int = original_samples + noise * sigma return noisy_samples def __len__( self) -> List[Any]: return self.config.num_train_timesteps
11
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {} lowerCAmelCase__ = {} lowerCAmelCase__ = {} def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ): _A : Union[str, Any] = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" ) _A : Dict = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" ) _A : Dict = format_type def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ): _A : Union[str, Any] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): _A : Union[str, Any] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ): if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ): _A : List[str] = get_format_type_from_alias(UpperCamelCase__ ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**UpperCamelCase__ ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
11
1
def _UpperCAmelCase (UpperCamelCase__ : str ): return "".join(chr(ord(UpperCamelCase__ ) - 32 ) if "a" <= char <= "z" else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
11
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) _A : int = (boundary[1] - boundary[0]) / steps _A : Any = boundary[0] _A : List[Any] = boundary[1] _A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _A : str = 0.0 y += (h / 2.0) * f(UpperCamelCase__ ) for i in x_i: # print(i) y += h * f(UpperCamelCase__ ) y += (h / 2.0) * f(UpperCamelCase__ ) return y def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ): _A : Optional[int] = a + h while x < (b - h): yield x _A : Dict = x + h def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here _A : Any = (x - 0) * (x - 0) return y def _UpperCAmelCase (): _A : Optional[Any] = 0.0 # Lower bound of integration _A : Optional[int] = 1.0 # Upper bound of integration _A : List[Any] = 10.0 # define number of steps or resolution _A : Any = [a, b] # define boundary of integration _A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ ) print(f"y = {y}" ) if __name__ == "__main__": main()
11
1
import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowerCAmelCase__ = '.' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) lowerCAmelCase__ = [ 'Assert', 'AssignVariableOp', 'EmptyTensorList', 'MergeV2Checkpoints', 'ReadVariableOp', 'ResourceGather', 'RestoreV2', 'SaveV2', 'ShardedFilename', 'StatefulPartitionedCall', 'StaticRegexFullMatch', 'VarHandleOp', ] def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ): _A : Dict = SavedModel() _A : int = [] with open(os.path.join(UpperCamelCase__ , "utils" , "tf_ops" , "onnx.json" ) ) as f: _A : Optional[int] = json.load(UpperCamelCase__ )["opsets"] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(UpperCamelCase__ )] ) with open(UpperCamelCase__ , "rb" ) as f: saved_model.ParseFromString(f.read() ) _A : List[Any] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want _A : Any = sorted(UpperCamelCase__ ) _A : Any = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(UpperCamelCase__ ) if strict and len(UpperCamelCase__ ) > 0: raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops ) elif len(UpperCamelCase__ ) > 0: print(f"Found the following incompatible ops for the opset {opset}:" ) print(*UpperCamelCase__ , sep="\n" ) else: print(f"The saved model {saved_model_path} can properly be converted with ONNX." ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).') parser.add_argument( '--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.' ) parser.add_argument( '--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.' ) parser.add_argument( '--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)' ) lowerCAmelCase__ = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
11
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @parameterized.expand([(None,), ("foo.json",)]) def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]: _A : str = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) _A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 5_0) self.assertEqual(loaded_config.max_length , 2_0) self.assertEqual(loaded_config.max_time , __lowerCamelCase) def _lowerCamelCase ( self) -> Optional[int]: _A : Optional[int] = AutoConfig.from_pretrained("gpt2") _A : int = GenerationConfig.from_model_config(__lowerCamelCase) _A : List[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(__lowerCamelCase , __lowerCamelCase) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _lowerCamelCase ( self) -> Optional[Any]: _A : Optional[Any] = GenerationConfig() _A : List[Any] = { "max_new_tokens": 1_0_2_4, "foo": "bar", } _A : List[str] = copy.deepcopy(__lowerCamelCase) _A : int = generation_config.update(**__lowerCamelCase) # update_kwargs was not modified (no side effects) self.assertEqual(__lowerCamelCase , __lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_0_2_4) # `.update()` returns a dictionary of unused kwargs self.assertEqual(__lowerCamelCase , {"foo": "bar"}) def _lowerCamelCase ( self) -> Any: _A : int = GenerationConfig() _A : int = "bar" with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(__lowerCamelCase) _A : Any = GenerationConfig.from_pretrained(__lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , "bar") _A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase) assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config def _lowerCamelCase ( self) -> List[str]: _A : Union[str, Any] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , __lowerCamelCase) self.assertEqual(default_config.num_beams , 1) _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , __lowerCamelCase) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase) _A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @classmethod def _lowerCamelCase ( cls) -> Optional[int]: _A : Dict = TOKEN HfFolder.save_token(__lowerCamelCase) @classmethod def _lowerCamelCase ( cls) -> List[Any]: try: delete_repo(token=cls._token , repo_id="test-generation-config") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org") except HTTPError: pass def _lowerCamelCase ( self) -> Any: _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("test-generation-config" , use_auth_token=self._token) _A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="test-generation-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Union[str, Any] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token) _A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
11
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ]) class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self) -> str: if self.framework == "pytorch": subprocess.run( F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , ) assert hasattr(self , "env") def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: _A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings _A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , ) def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv") @parameterized.expand([(2,)]) def _lowerCamelCase ( self , __lowerCamelCase) -> Any: # create estimator _A : Union[str, Any] = self.create_estimator(__lowerCamelCase) # run training estimator.fit() # result dataframe _A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) _A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _A : Optional[Any] = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
11
import pickle import numpy as np from matplotlib import pyplot as plt class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str: _A : Optional[int] = bp_numa _A : Dict = bp_numa _A : Tuple = bp_numa _A : List[str] = conva_get[:2] _A : Tuple = conva_get[2] _A : Optional[int] = size_pa _A : Optional[Any] = rate_w _A : Optional[Any] = rate_t _A : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] _A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Any = -2 * np.random.rand(self.conva[1]) + 1 _A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1 _A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1 def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # save model dict with pickle _A : Dict = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(__lowerCamelCase , "wb") as f: pickle.dump(__lowerCamelCase , __lowerCamelCase) print(F"Model saved: {save_path}") @classmethod def _lowerCamelCase ( cls , __lowerCamelCase) -> Any: # read saved model with open(__lowerCamelCase , "rb") as f: _A : Any = pickle.load(__lowerCamelCase) # noqa: S301 _A : Optional[int] = model_dic.get("conv1") conv_get.append(model_dic.get("step_conv1")) _A : str = model_dic.get("size_pooling1") _A : List[str] = model_dic.get("num_bp1") _A : Union[str, Any] = model_dic.get("num_bp2") _A : List[Any] = model_dic.get("num_bp3") _A : Dict = model_dic.get("rate_weight") _A : List[Any] = model_dic.get("rate_thre") # create model instance _A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # modify model parameter _A : List[Any] = model_dic.get("w_conv1") _A : Union[str, Any] = model_dic.get("wkj") _A : str = model_dic.get("vji") _A : List[str] = model_dic.get("thre_conv1") _A : Optional[Any] = model_dic.get("thre_bp2") _A : Dict = model_dic.get("thre_bp3") return conv_ins def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: return 1 / (1 + np.exp(-1 * x)) def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: return round(__lowerCamelCase , 3) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]: # convolution process _A : Tuple = convs[0] _A : Union[str, Any] = convs[1] _A : List[Any] = np.shape(__lowerCamelCase)[0] # get the data slice of original image data, data_focus _A : Tuple = [] for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): _A : Optional[int] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__lowerCamelCase) # calculate the feature map of every single kernel, and saved as list of matrix _A : Optional[Any] = [] _A : Optional[int] = int((size_data - size_conv) / conv_step + 1) for i_map in range(__lowerCamelCase): _A : Optional[int] = [] for i_focus in range(len(__lowerCamelCase)): _A : Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(__lowerCamelCase)) _A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape( __lowerCamelCase , __lowerCamelCase) data_featuremap.append(__lowerCamelCase) # expanding the data slice to One dimenssion _A : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__lowerCamelCase)) _A : Dict = np.asarray(__lowerCamelCase) return focus_list, data_featuremap def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict: # pooling process _A : Optional[Any] = len(featuremaps[0]) _A : str = int(size_map / size_pooling) _A : Optional[int] = [] for i_map in range(len(__lowerCamelCase)): _A : int = featuremaps[i_map] _A : Optional[int] = [] for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase): for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase): _A : str = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__lowerCamelCase)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__lowerCamelCase)) _A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase) featuremap_pooled.append(__lowerCamelCase) return featuremap_pooled def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: # expanding three dimension data to one dimension list _A : Tuple = [] for i in range(len(__lowerCamelCase)): _A : Union[str, Any] = np.shape(data[i]) _A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1]) _A : Optional[Any] = data_listed.getA().tolist()[0] data_expanded.extend(__lowerCamelCase) _A : Optional[Any] = np.asarray(__lowerCamelCase) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: # expanding matrix to one dimension list _A : List[Any] = np.asarray(__lowerCamelCase) _A : Union[str, Any] = np.shape(__lowerCamelCase) _A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Dict = [] _A : Any = 0 for i_map in range(__lowerCamelCase): _A : Union[str, Any] = np.ones((size_map, size_map)) for i in range(0 , __lowerCamelCase , __lowerCamelCase): for j in range(0 , __lowerCamelCase , __lowerCamelCase): _A : List[Any] = pd_pool[ i_pool ] _A : Tuple = i_pool + 1 _A : Optional[Any] = np.multiply( __lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(__lowerCamelCase) return pd_all def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]: # model traning print("----------------------Start Training-------------------------") print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase))) print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase))) _A : Tuple = 0 _A : Dict = [] _A : Optional[Any] = 1_0_0_0_0 while rp < n_repeat and mse >= error_accuracy: _A : Union[str, Any] = 0 print(F"-------------Learning Time {rp}--------------") for p in range(len(__lowerCamelCase)): # print('------------Learning Image: %d--------------'%p) _A : str = np.asmatrix(datas_train[p]) _A : Union[str, Any] = np.asarray(datas_teach[p]) _A , _A : Any = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = np.shape(__lowerCamelCase) _A : List[str] = self._expand(__lowerCamelCase) _A : Tuple = data_bp_input _A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa _A : List[Any] = self.sig(__lowerCamelCase) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa _A : List[str] = self.sig(__lowerCamelCase) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- _A : int = np.multiply( (data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Optional[Any] = np.multiply( np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji) _A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga) _A : Dict = pd_conva_pooled.T.getA().tolist() _A : Optional[Any] = self._calculate_gradient_from_pool( __lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): _A : int = self._expand_mat(pd_conva_all[k_conv]) _A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase) _A : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) _A : Any = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer _A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight _A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight _A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre _A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image _A : Optional[int] = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) _A : Any = rp + 1 _A : Dict = error_count / patterns all_mse.append(__lowerCamelCase) def draw_error(): _A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(__lowerCamelCase , "+-") plt.plot(__lowerCamelCase , "r--") plt.xlabel("Learning Times") plt.ylabel("All_mse") plt.grid(__lowerCamelCase , alpha=0.5) plt.show() print("------------------Training Complished---------------------") print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}")) if draw_e: draw_error() return mse def _lowerCamelCase ( self , __lowerCamelCase) -> int: # model predict _A : Union[str, Any] = [] print("-------------------Start Testing-------------------------") print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase))) for p in range(len(__lowerCamelCase)): _A : int = np.asmatrix(datas_test[p]) _A , _A : List[Any] = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : str = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = self._expand(__lowerCamelCase) _A : List[Any] = data_bp_input _A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa _A : int = self.sig(__lowerCamelCase) _A : int = bp_outa * self.wkj.T - self.thre_bpa _A : Optional[int] = self.sig(__lowerCamelCase) produce_out.extend(bp_outa.getA().tolist()) _A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out] return np.asarray(__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # return the data of image after convoluting process so we can check it out _A : Optional[int] = np.asmatrix(__lowerCamelCase) _A , _A : Tuple = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
11
1
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"] __SCREAMING_SNAKE_CASE = "OwlViTImageProcessor" __SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]: _A : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __lowerCamelCase , ) _A : List[Any] = kwargs.pop("feature_extractor") _A : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(__lowerCamelCase , __lowerCamelCase) def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none.") if text is not None: if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)): _A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)] elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase): _A : Optional[Any] = [] # Maximum number of queries across batch _A : str = max([len(__lowerCamelCase) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(__lowerCamelCase) != max_num_queries: _A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase)) _A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) encodings.append(__lowerCamelCase) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings") if return_tensors == "np": _A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch _A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0) _A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0) else: raise ValueError("Target return tensor type could not be returned") _A : Optional[Any] = BatchEncoding() _A : Tuple = input_ids _A : Dict = attention_mask if query_images is not None: _A : Optional[Any] = BatchEncoding() _A : List[str] = self.image_processor( __lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values _A : Union[str, Any] = query_pixel_values if images is not None: _A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) if text is not None and images is not None: _A : Tuple = image_features.pixel_values return encoding elif query_images is not None and images is not None: _A : int = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]: return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase) @property def _lowerCamelCase ( self) -> int: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , ) return self.image_processor_class @property def _lowerCamelCase ( self) -> List[str]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , ) return self.image_processor
11
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowerCAmelCase__ = object() # For specifying empty leaf dict `{}` lowerCAmelCase__ = object() def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ): _A : str = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ): _A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )] if matches and all(UpperCamelCase__ ): return True return False def _UpperCAmelCase (UpperCamelCase__ : str ): def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ): for rule, replacement in rules: if _match(UpperCamelCase__ , UpperCamelCase__ ): return replacement return val return replace def _UpperCAmelCase (): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )), (("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )), (("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _UpperCAmelCase (UpperCamelCase__ : List[str] ): _A : int = _get_partition_rules() _A : Optional[int] = _replacement_rules(UpperCamelCase__ ) _A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )} _A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase__ ) )
11
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def _UpperCAmelCase (UpperCamelCase__ : Optional[Any]=None ): _A : Tuple = argparse.ArgumentParser(add_help=UpperCamelCase__ , allow_abbrev=UpperCamelCase__ ) # The main config parser _A : Union[str, Any] = config_command_parser(UpperCamelCase__ ) # The subparser to add commands to _A : List[str] = config_parser.add_subparsers(title="subcommands" , dest="subcommand" ) # Then add other parsers with the parent parser default_command_parser(UpperCamelCase__ , parents=[parent_parser] ) update_command_parser(UpperCamelCase__ , parents=[parent_parser] ) return config_parser def _UpperCAmelCase (): _A : Optional[int] = get_config_parser() _A : Union[str, Any] = config_parser.parse_args() if not hasattr(UpperCamelCase__ , "func" ): config_parser.print_help() exit(1 ) # Run args.func(UpperCamelCase__ ) if __name__ == "__main__": main()
11
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) _A : int = input_str.split("_" ) _A : str = 0 if use_pascal else 1 _A : str = words[start_index:] _A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize] _A : Any = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
11
1
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = VQModel __SCREAMING_SNAKE_CASE = "sample" @property def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]: _A : Optional[int] = 4 _A : Tuple = 3 _A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase) return {"sample": image} @property def _lowerCamelCase ( self) -> int: return (3, 3_2, 3_2) @property def _lowerCamelCase ( self) -> List[Any]: return (3, 3_2, 3_2) def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[Any] = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } _A : int = self.dummy_input return init_dict, inputs_dict def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> Any: pass def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase) self.assertIsNotNone(__lowerCamelCase) self.assertEqual(len(loading_info["missing_keys"]) , 0) model.to(__lowerCamelCase) _A : str = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def _lowerCamelCase ( self) -> Union[str, Any]: _A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy") model.to(__lowerCamelCase).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) _A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) _A : Optional[int] = image.to(__lowerCamelCase) with torch.no_grad(): _A : List[str] = model(__lowerCamelCase).sample _A : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3]) # fmt: on self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
11
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): _A : Dict = list(range(len(UpperCamelCase__ ) ) ) _A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )] index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ ) _A : float = 0 _A : list[float] = [0] * len(UpperCamelCase__ ) for i in index: if weight[i] <= capacity: _A : Union[str, Any] = 1 max_value += value[i] capacity -= weight[i] else: _A : Optional[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
11
1
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowerCAmelCase__ = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowerCAmelCase__ = get_tests_dir('fixtures/vocab.json') lowerCAmelCase__ = get_tests_dir('fixtures') class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] def _lowerCamelCase ( self) -> Any: _A : str = 0 def _lowerCamelCase ( self) -> Optional[Any]: _A : Dict = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsInstance(__lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> int: with tempfile.TemporaryDirectory() as tmpdirname: _A : Union[str, Any] = WavaVecaConfig() _A : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") # save in new folder model_config.save_pretrained(__lowerCamelCase) processor.save_pretrained(__lowerCamelCase) _A : Dict = AutoProcessor.from_pretrained(__lowerCamelCase) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> Dict: with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase)) copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , "vocab.json")) _A : str = AutoProcessor.from_pretrained(__lowerCamelCase) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> int: with tempfile.TemporaryDirectory() as tmpdirname: _A : Dict = WavaVecaFeatureExtractor() _A : List[Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h") _A : Union[str, Any] = WavaVecaProcessor(__lowerCamelCase , __lowerCamelCase) # save in new folder processor.save_pretrained(__lowerCamelCase) # drop `processor_class` in tokenizer with open(os.path.join(__lowerCamelCase , __lowerCamelCase) , "r") as f: _A : Dict = json.load(__lowerCamelCase) config_dict.pop("processor_class") with open(os.path.join(__lowerCamelCase , __lowerCamelCase) , "w") as f: f.write(json.dumps(__lowerCamelCase)) _A : int = AutoProcessor.from_pretrained(__lowerCamelCase) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> Dict: with tempfile.TemporaryDirectory() as tmpdirname: _A : Tuple = WavaVecaFeatureExtractor() _A : List[str] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h") _A : Union[str, Any] = WavaVecaProcessor(__lowerCamelCase , __lowerCamelCase) # save in new folder processor.save_pretrained(__lowerCamelCase) # drop `processor_class` in feature extractor with open(os.path.join(__lowerCamelCase , __lowerCamelCase) , "r") as f: _A : Optional[Any] = json.load(__lowerCamelCase) config_dict.pop("processor_class") with open(os.path.join(__lowerCamelCase , __lowerCamelCase) , "w") as f: f.write(json.dumps(__lowerCamelCase)) _A : str = AutoProcessor.from_pretrained(__lowerCamelCase) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> List[str]: with tempfile.TemporaryDirectory() as tmpdirname: _A : Tuple = WavaVecaConfig(processor_class="Wav2Vec2Processor") model_config.save_pretrained(__lowerCamelCase) # copy relevant files copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , "vocab.json")) # create emtpy sample processor with open(os.path.join(__lowerCamelCase , __lowerCamelCase) , "w") as f: f.write("{}") _A : Optional[int] = AutoProcessor.from_pretrained(__lowerCamelCase) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> Union[str, Any]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__lowerCamelCase): _A : Any = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor") # If remote code is disabled, we can't load this config. with self.assertRaises(__lowerCamelCase): _A : Optional[Any] = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowerCamelCase) _A : str = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowerCamelCase) self.assertTrue(processor.special_attribute_present) self.assertEqual(processor.__class__.__name__ , "NewProcessor") _A : Any = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor") _A : Dict = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast") # Test we can also load the slow version _A : str = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase) _A : List[Any] = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present) self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer") else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer") def _lowerCamelCase ( self) -> Dict: try: AutoConfig.register("custom" , __lowerCamelCase) AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase) AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase) AutoProcessor.register(__lowerCamelCase , __lowerCamelCase) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCamelCase): AutoProcessor.register(__lowerCamelCase , __lowerCamelCase) # Now that the config is registered, it can be used as any other config with the auto-API _A : Any = CustomFeatureExtractor.from_pretrained(__lowerCamelCase) with tempfile.TemporaryDirectory() as tmp_dir: _A : Optional[int] = os.path.join(__lowerCamelCase , "vocab.txt") with open(__lowerCamelCase , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) _A : List[str] = CustomTokenizer(__lowerCamelCase) _A : Tuple = CustomProcessor(__lowerCamelCase , __lowerCamelCase) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(__lowerCamelCase) _A : Tuple = AutoProcessor.from_pretrained(__lowerCamelCase) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _lowerCamelCase ( self) -> Any: class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = False class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = False class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "AutoFeatureExtractor" __SCREAMING_SNAKE_CASE = "AutoTokenizer" __SCREAMING_SNAKE_CASE = False try: AutoConfig.register("custom" , __lowerCamelCase) AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase) AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase) AutoProcessor.register(__lowerCamelCase , __lowerCamelCase) # If remote code is not set, the default is to use local classes. _A : Dict = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor") self.assertEqual(processor.__class__.__name__ , "NewProcessor") self.assertFalse(processor.special_attribute_present) self.assertFalse(processor.feature_extractor.special_attribute_present) self.assertFalse(processor.tokenizer.special_attribute_present) # If remote code is disabled, we load the local ones. _A : Any = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowerCamelCase) self.assertEqual(processor.__class__.__name__ , "NewProcessor") self.assertFalse(processor.special_attribute_present) self.assertFalse(processor.feature_extractor.special_attribute_present) self.assertFalse(processor.tokenizer.special_attribute_present) # If remote is enabled, we load from the Hub. _A : Union[str, Any] = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowerCamelCase) self.assertEqual(processor.__class__.__name__ , "NewProcessor") self.assertTrue(processor.special_attribute_present) self.assertTrue(processor.feature_extractor.special_attribute_present) self.assertTrue(processor.tokenizer.special_attribute_present) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _lowerCamelCase ( self) -> Dict: _A : Tuple = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert") self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast") def _lowerCamelCase ( self) -> List[Any]: _A : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext") self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor") @is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def _lowerCamelCase ( cls) -> Any: _A : Any = TOKEN HfFolder.save_token(__lowerCamelCase) @classmethod def _lowerCamelCase ( cls) -> List[str]: try: delete_repo(token=cls._token , repo_id="test-processor") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-processor-org") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-processor") except HTTPError: pass def _lowerCamelCase ( self) -> Tuple: _A : Optional[Any] = WavaVecaProcessor.from_pretrained(__lowerCamelCase) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(__lowerCamelCase , "test-processor") , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : str = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor") for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(new_processor.feature_extractor , __lowerCamelCase)) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab()) def _lowerCamelCase ( self) -> str: _A : Optional[Any] = WavaVecaProcessor.from_pretrained(__lowerCamelCase) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(__lowerCamelCase , "test-processor-org") , push_to_hub=__lowerCamelCase , use_auth_token=self._token , organization="valid_org" , ) _A : List[str] = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org") for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(new_processor.feature_extractor , __lowerCamelCase)) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab()) def _lowerCamelCase ( self) -> int: CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() _A : Any = CustomFeatureExtractor.from_pretrained(__lowerCamelCase) with tempfile.TemporaryDirectory() as tmp_dir: _A : str = os.path.join(__lowerCamelCase , "vocab.txt") with open(__lowerCamelCase , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) _A : List[Any] = CustomTokenizer(__lowerCamelCase) _A : int = CustomProcessor(__lowerCamelCase , __lowerCamelCase) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F"{USER}/test-dynamic-processor" , token=self._token) _A : Optional[Any] = Repository(__lowerCamelCase , clone_from=F"{USER}/test-dynamic-processor" , token=self._token) processor.save_pretrained(__lowerCamelCase) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { "AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor", "AutoProcessor": "custom_processing.CustomProcessor", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(__lowerCamelCase , "tokenizer_config.json")) as f: _A : Any = json.load(__lowerCamelCase) self.assertDictEqual( tokenizer_config["auto_map"] , { "AutoTokenizer": ["custom_tokenization.CustomTokenizer", None], "AutoProcessor": "custom_processing.CustomProcessor", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , "custom_feature_extraction.py"))) self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , "custom_tokenization.py"))) self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , "custom_processing.py"))) repo.push_to_hub() _A : List[Any] = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=__lowerCamelCase) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor")
11
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None: warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase)
11
1
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowerCAmelCase__ = logging.get_logger(__name__) def _UpperCAmelCase (UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ): _A : Dict = set() _A : Optional[int] = [] def parse_line(UpperCamelCase__ : Tuple ): for line in fp: if isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : str = line.decode("UTF-8" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(" " ): # process a single warning and move it to `selected_warnings`. if len(UpperCamelCase__ ) > 0: _A : List[str] = "\n".join(UpperCamelCase__ ) # Only keep the warnings specified in `targets` if any(f": {x}: " in warning for x in targets ): selected_warnings.add(UpperCamelCase__ ) buffer.clear() continue else: _A : Optional[int] = line.strip() buffer.append(UpperCamelCase__ ) if from_gh: for filename in os.listdir(UpperCamelCase__ ): _A : Tuple = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) if not os.path.isdir(UpperCamelCase__ ): # read the file if filename != "warnings.txt": continue with open(UpperCamelCase__ ) as fp: parse_line(UpperCamelCase__ ) else: try: with zipfile.ZipFile(UpperCamelCase__ ) as z: for filename in z.namelist(): if not os.path.isdir(UpperCamelCase__ ): # read the file if filename != "warnings.txt": continue with z.open(UpperCamelCase__ ) as fp: parse_line(UpperCamelCase__ ) except Exception: logger.warning( f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." ) return selected_warnings def _UpperCAmelCase (UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ): _A : int = set() _A : int = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for p in os.listdir(UpperCamelCase__ ) if (p.endswith(".zip" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(UpperCamelCase__ , UpperCamelCase__ ) ) return selected_warnings if __name__ == "__main__": def _UpperCAmelCase (UpperCamelCase__ : int ): return values.split("," ) lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') # optional parameters parser.add_argument( '--targets', default='DeprecationWarning,UserWarning,FutureWarning', type=list_str, help='Comma-separated list of target warning(s) which we want to extract.', ) parser.add_argument( '--from_gh', action='store_true', help='If running from a GitHub action workflow and collecting warnings from its artifacts.', ) lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowerCAmelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('=' * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowerCAmelCase__ = extract_warnings(args.output_dir, args.targets) lowerCAmelCase__ = sorted(selected_warnings) with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
11
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]: _A : int = parent _A : Optional[Any] = batch_size _A : str = image_size _A : Tuple = patch_size _A : Tuple = num_channels _A : Optional[int] = embed_dim _A : Dict = depths _A : Any = num_heads _A : Any = window_size _A : int = mlp_ratio _A : Any = qkv_bias _A : Union[str, Any] = hidden_dropout_prob _A : Optional[Any] = attention_probs_dropout_prob _A : Dict = drop_path_rate _A : List[Any] = hidden_act _A : Any = use_absolute_embeddings _A : Optional[int] = patch_norm _A : Tuple = layer_norm_eps _A : List[str] = initializer_range _A : Optional[int] = is_training _A : Optional[Any] = scope _A : Optional[int] = use_labels _A : Dict = type_sequence_label_size _A : str = encoder_stride _A : Optional[int] = out_features _A : Optional[int] = out_indices def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _A : Optional[Any] = None if self.use_labels: _A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _A : Optional[int] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]: _A : Dict = MaskFormerSwinModel(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : int = model(__lowerCamelCase) _A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) _A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict: _A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : Dict = model(__lowerCamelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4]) # verify ValueError with self.parent.assertRaises(__lowerCamelCase): _A : Union[str, Any] = ["stem"] _A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) def _lowerCamelCase ( self) -> Dict: _A : Any = self.prepare_config_and_inputs() _A , _A , _A : List[Any] = config_and_inputs _A : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def _lowerCamelCase ( self) -> str: _A : Union[str, Any] = MaskFormerSwinModelTester(self) _A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" )) def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self) -> str: return def _lowerCamelCase ( self) -> List[Any]: _A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase) @unittest.skip("Swin does not use inputs_embeds") def _lowerCamelCase ( self) -> str: pass @unittest.skip("Swin does not support feedforward chunking") def _lowerCamelCase ( self) -> List[Any]: pass def _lowerCamelCase ( self) -> Optional[int]: _A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : Union[str, Any] = model_class(__lowerCamelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _A : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear)) def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : int = model_class(__lowerCamelCase) _A : Optional[int] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A : int = [*signature.parameters.keys()] _A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions") def _lowerCamelCase ( self) -> Tuple: pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Any = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() with torch.no_grad(): _A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)) _A : Tuple = outputs.hidden_states _A : Any = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1) self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase) # Swin has a different seq_length _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self) -> Dict: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Optional[int] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> Tuple: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Optional[int] = 3 _A : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Union[str, Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__lowerCamelCase): _A : Optional[int] = 0 return t def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}): with torch.no_grad(): _A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase) _A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple() def recursive_check(__lowerCamelCase , __lowerCamelCase): if isinstance(__lowerCamelCase , (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase): recursive_check(__lowerCamelCase , __lowerCamelCase) elif isinstance(__lowerCamelCase , __lowerCamelCase): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values()): recursive_check(__lowerCamelCase , __lowerCamelCase) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=( "Tuple and dict output are not equal. Difference:" F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has" F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}." ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase) for model_class in self.all_model_classes: _A : List[Any] = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) _A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) @require_torch class lowerCAmelCase__ ( unittest.TestCase , a): '''simple docstring''' __SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else () __SCREAMING_SNAKE_CASE = MaskFormerSwinConfig def _lowerCamelCase ( self) -> Optional[Any]: _A : Tuple = MaskFormerSwinModelTester(self) def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _A : Union[str, Any] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: _A : Optional[Any] = backbone_class(__lowerCamelCase) backbone.to(__lowerCamelCase) backbone.eval() _A : List[Any] = backbone(**__lowerCamelCase) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __lowerCamelCase) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) # Test output_hidden_states=True _A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names)) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _A , _A , _A : List[str] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels)) # Test output_attentions=True if self.has_attentions: _A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase) self.assertIsNotNone(outputs.attentions)
11
1
def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ): _A : str = 0 _A : Tuple = len(UpperCamelCase__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None _A : Dict = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase__ ): return None _A : Union[str, Any] = sorted_collection[point] if current_item == item: return point else: if point < left: _A : List[str] = left _A : Union[str, Any] = point elif point > right: _A : Any = right _A : Any = point else: if item < current_item: _A : Union[str, Any] = point - 1 else: _A : List[str] = point + 1 return None def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ): # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None _A : List[Any] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) elif point > right: return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , point - 1 ) else: return interpolation_search_by_recursion( UpperCamelCase__ , UpperCamelCase__ , point + 1 , UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ): if collection != sorted(UpperCamelCase__ ): raise ValueError("Collection must be ascending sorted" ) return True if __name__ == "__main__": import sys lowerCAmelCase__ = 0 if debug == 1: lowerCAmelCase__ = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit('Sequence must be ascending sorted to apply interpolation search') lowerCAmelCase__ = 67 lowerCAmelCase__ = interpolation_search(collection, target) if result is not None: print(f"{target} found at positions: {result}") else: print('Not found')
11
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
1
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCAmelCase__ : '''simple docstring''' @staticmethod def _lowerCamelCase ( *__lowerCamelCase , **__lowerCamelCase) -> Union[str, Any]: pass @is_pipeline_test @require_vision @require_timm @require_torch class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = MODEL_FOR_OBJECT_DETECTION_MAPPING def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[Any]: _A : Dict = ObjectDetectionPipeline(model=__lowerCamelCase , image_processor=__lowerCamelCase) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Tuple: _A : Optional[int] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0) self.assertGreater(len(__lowerCamelCase) , 0) for detected_object in outputs: self.assertEqual( __lowerCamelCase , { "score": ANY(__lowerCamelCase), "label": ANY(__lowerCamelCase), "box": {"xmin": ANY(__lowerCamelCase), "ymin": ANY(__lowerCamelCase), "xmax": ANY(__lowerCamelCase), "ymax": ANY(__lowerCamelCase)}, } , ) import datasets _A : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test") _A : str = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] _A : str = object_detector(__lowerCamelCase , threshold=0.0) self.assertEqual(len(__lowerCamelCase) , len(__lowerCamelCase)) for outputs in batch_outputs: self.assertGreater(len(__lowerCamelCase) , 0) for detected_object in outputs: self.assertEqual( __lowerCamelCase , { "score": ANY(__lowerCamelCase), "label": ANY(__lowerCamelCase), "box": {"xmin": ANY(__lowerCamelCase), "ymin": ANY(__lowerCamelCase), "xmax": ANY(__lowerCamelCase), "ymax": ANY(__lowerCamelCase)}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF") def _lowerCamelCase ( self) -> List[Any]: pass @require_torch def _lowerCamelCase ( self) -> List[Any]: _A : Optional[Any] = "hf-internal-testing/tiny-detr-mobilenetsv3" _A : Optional[int] = AutoModelForObjectDetection.from_pretrained(__lowerCamelCase) _A : int = AutoFeatureExtractor.from_pretrained(__lowerCamelCase) _A : Optional[Any] = ObjectDetectionPipeline(model=__lowerCamelCase , feature_extractor=__lowerCamelCase) _A : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4) , [ {"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, {"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, ] , ) _A : Any = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4) , [ [ {"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, {"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, ], [ {"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, {"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, ], ] , ) @require_torch @slow def _lowerCamelCase ( self) -> Optional[Any]: _A : List[Any] = "facebook/detr-resnet-50" _A : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__lowerCamelCase) _A : Optional[int] = AutoFeatureExtractor.from_pretrained(__lowerCamelCase) _A : Union[str, Any] = ObjectDetectionPipeline(model=__lowerCamelCase , feature_extractor=__lowerCamelCase) _A : Optional[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4) , [ {"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ] , ) _A : Any = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ]) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4) , [ [ {"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ], [ {"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ], ] , ) @require_torch @slow def _lowerCamelCase ( self) -> Optional[Any]: _A : int = "facebook/detr-resnet-50" _A : str = pipeline("object-detection" , model=__lowerCamelCase) _A : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4) , [ {"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ] , ) _A : List[Any] = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ]) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4) , [ [ {"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ], [ {"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ], ] , ) @require_torch @slow def _lowerCamelCase ( self) -> int: _A : Any = 0.9_9_8_5 _A : List[str] = "facebook/detr-resnet-50" _A : Dict = pipeline("object-detection" , model=__lowerCamelCase) _A : List[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__lowerCamelCase) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4) , [ {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ] , ) @require_torch @require_pytesseract @slow def _lowerCamelCase ( self) -> Tuple: _A : str = "Narsil/layoutlmv3-finetuned-funsd" _A : Optional[Any] = 0.9_9_9_3 _A : Any = pipeline("object-detection" , model=__lowerCamelCase , threshold=__lowerCamelCase) _A : List[str] = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png") self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4) , [ {"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}}, {"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}}, ] , )
11
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ): _A , _A : Any = image.size _A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) _A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0 _A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 ) _A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ ) return 2.0 * image - 1.0 class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]: super().__init__() self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase) @torch.no_grad() def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Tuple = 1 elif isinstance(__lowerCamelCase , torch.Tensor): _A : Union[str, Any] = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}") if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Union[str, Any] = preprocess(__lowerCamelCase) _A , _A : Union[str, Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width) _A : str = next(self.unet.parameters()).dtype _A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase) _A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase) # set timesteps and move to the correct device self.scheduler.set_timesteps(__lowerCamelCase , device=self.device) _A : Any = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _A : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) _A : Optional[int] = {} if accepts_eta: _A : List[Any] = eta for t in self.progress_bar(__lowerCamelCase): # concat latents and low resolution image in the channel dimension. _A : List[Any] = torch.cat([latents, image] , dim=1) _A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase) # predict the noise residual _A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample # compute the previous noisy sample x_t -> x_t-1 _A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample # decode the image latents with the VQVAE _A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample _A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0) _A : Tuple = image / 2 + 0.5 _A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _A : Optional[int] = self.numpy_to_pil(__lowerCamelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase)
11
1
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=4 , ) -> str: _A : Any = parent _A : Dict = batch_size _A : Dict = seq_length _A : Union[str, Any] = is_training _A : List[Any] = use_attention_mask _A : str = use_token_type_ids _A : Dict = use_labels _A : List[str] = vocab_size _A : Dict = hidden_size _A : List[Any] = num_hidden_layers _A : int = num_attention_heads _A : List[Any] = intermediate_size _A : int = hidden_act _A : List[str] = hidden_dropout_prob _A : List[str] = attention_probs_dropout_prob _A : Optional[int] = max_position_embeddings _A : List[str] = type_vocab_size _A : Dict = type_sequence_label_size _A : List[str] = initializer_range _A : Dict = num_choices def _lowerCamelCase ( self) -> str: _A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _A : Dict = None if self.use_attention_mask: _A : Dict = random_attention_mask([self.batch_size, self.seq_length]) _A : str = None if self.use_token_type_ids: _A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _A : Any = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCamelCase ( self) -> List[Any]: _A : Optional[Any] = self.prepare_config_and_inputs() _A , _A , _A , _A : Any = config_and_inputs _A : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def _lowerCamelCase ( self) -> str: _A : Dict = self.prepare_config_and_inputs() _A , _A , _A , _A : Optional[Any] = config_and_inputs _A : Any = True _A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) _A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class lowerCAmelCase__ ( a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self) -> Tuple: _A : Union[str, Any] = FlaxRobertaPreLayerNormModelTester(self) @slow def _lowerCamelCase ( self) -> Optional[int]: for model_class_name in self.all_model_classes: _A : List[str] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__lowerCamelCase) _A : int = model(np.ones((1, 1))) self.assertIsNotNone(__lowerCamelCase) @require_flax class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @slow def _lowerCamelCase ( self) -> Optional[int]: _A : str = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__lowerCamelCase) _A : List[Any] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa) _A : Optional[int] = model(__lowerCamelCase)[0] _A : Union[str, Any] = [1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape) , __lowerCamelCase) # compare the actual values for a slice. _A : Dict = np.array( [[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa) self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4)) @slow def _lowerCamelCase ( self) -> int: _A : Optional[Any] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__lowerCamelCase) _A : str = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa) _A : Dict = model(__lowerCamelCase)[0] # compare the actual values for a slice. _A : List[str] = np.array( [[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa) self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4))
11
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = VQModel __SCREAMING_SNAKE_CASE = "sample" @property def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]: _A : Optional[int] = 4 _A : Tuple = 3 _A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase) return {"sample": image} @property def _lowerCamelCase ( self) -> int: return (3, 3_2, 3_2) @property def _lowerCamelCase ( self) -> List[Any]: return (3, 3_2, 3_2) def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[Any] = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } _A : int = self.dummy_input return init_dict, inputs_dict def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> Any: pass def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase) self.assertIsNotNone(__lowerCamelCase) self.assertEqual(len(loading_info["missing_keys"]) , 0) model.to(__lowerCamelCase) _A : str = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def _lowerCamelCase ( self) -> Union[str, Any]: _A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy") model.to(__lowerCamelCase).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) _A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) _A : Optional[int] = image.to(__lowerCamelCase) with torch.no_grad(): _A : List[str] = model(__lowerCamelCase).sample _A : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3]) # fmt: on self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
11
1
import math import unittest def _UpperCAmelCase (UpperCamelCase__ : int ): assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self) -> Optional[Any]: self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(1_1)) self.assertTrue(is_prime(1_3)) self.assertTrue(is_prime(1_7)) self.assertTrue(is_prime(1_9)) self.assertTrue(is_prime(2_3)) self.assertTrue(is_prime(2_9)) def _lowerCamelCase ( self) -> List[Any]: with self.assertRaises(__lowerCamelCase): is_prime(-1_9) self.assertFalse( is_prime(0) , "Zero doesn't have any positive factors, primes must have exactly two." , ) self.assertFalse( is_prime(1) , "One only has 1 positive factor, primes must have exactly two." , ) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
11
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } lowerCAmelCase__ = { 'facebook/mbart-large-en-ro': 10_24, 'facebook/mbart-large-cc25': 10_24, } # fmt: off lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = MBartTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it _A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , ) _A : Union[str, Any] = vocab_file _A : int = False if not self.vocab_file else True _A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "en_XX" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : List[str] = [self.sep_token_id] _A : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : str = src_lang _A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Dict = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding: _A : Any = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> List[str]: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : int = self.convert_tokens_to_ids(__lowerCamelCase) _A : int = [] _A : List[str] = [self.eos_token_id, self.cur_lang_code] _A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : str = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase) _A : List[Any] = [] _A : str = [self.eos_token_id, self.cur_lang_code] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : str = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : int = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "gptj" __SCREAMING_SNAKE_CASE = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , __lowerCamelCase=5_0_4_0_0 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=4_0_9_6 , __lowerCamelCase=2_8 , __lowerCamelCase=1_6 , __lowerCamelCase=6_4 , __lowerCamelCase=None , __lowerCamelCase="gelu_new" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0_2 , __lowerCamelCase=True , __lowerCamelCase=5_0_2_5_6 , __lowerCamelCase=5_0_2_5_6 , __lowerCamelCase=False , **__lowerCamelCase , ) -> Any: _A : Dict = vocab_size _A : Any = n_positions _A : List[str] = n_embd _A : Optional[int] = n_layer _A : str = n_head _A : Union[str, Any] = n_inner _A : List[Any] = rotary_dim _A : int = activation_function _A : Dict = resid_pdrop _A : int = embd_pdrop _A : int = attn_pdrop _A : Tuple = layer_norm_epsilon _A : List[Any] = initializer_range _A : Dict = use_cache _A : Any = bos_token_id _A : Optional[int] = eos_token_id super().__init__( bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase) class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase = "default" , __lowerCamelCase = None , __lowerCamelCase = False , ) -> Dict: super().__init__(__lowerCamelCase , task=__lowerCamelCase , patching_specs=__lowerCamelCase , use_past=__lowerCamelCase) if not getattr(self._config , "pad_token_id" , __lowerCamelCase): # TODO: how to do that better? _A : Dict = 0 @property def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]: _A : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}}) if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs") _A : List[Any] = {0: "batch", 1: "past_sequence + sequence"} else: _A : Dict = {0: "batch", 1: "sequence"} return common_inputs @property def _lowerCamelCase ( self) -> int: return self._config.n_layer @property def _lowerCamelCase ( self) -> int: return self._config.n_head def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Mapping[str, Any]: _A : Tuple = super(__lowerCamelCase , self).generate_dummy_inputs( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase) # We need to order the input in the way they appears in the forward() _A : Union[str, Any] = OrderedDict({"input_ids": common_inputs["input_ids"]}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch _A , _A : Optional[Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values _A : Optional[int] = seqlen + 2 _A : Optional[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _A : List[str] = [ (torch.zeros(__lowerCamelCase), torch.zeros(__lowerCamelCase)) for _ in range(self.num_layers) ] _A : List[Any] = common_inputs["attention_mask"] if self.use_past: _A : str = ordered_inputs["attention_mask"].dtype _A : Union[str, Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase)] , dim=1) return ordered_inputs @property def _lowerCamelCase ( self) -> int: return 1_3
11
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'vocab_file': 'vocab.json', 'tokenizer_config_file': 'tokenizer_config.json', 'merges_file': 'merges.txt', } lowerCAmelCase__ = { 'vocab_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json' ), }, 'tokenizer_config_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json' ), }, 'merges_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt' ), }, } lowerCAmelCase__ = '</w>' lowerCAmelCase__ = '@@ ' def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ): _A : Optional[int] = set() _A : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A : List[Any] = char return pairs # Speech2Text2 has no max input length lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24} class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]: super().__init__( unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , ) _A : Dict = do_lower_case with open(__lowerCamelCase , encoding="utf-8") as vocab_handle: _A : Optional[int] = json.load(__lowerCamelCase) _A : Optional[Any] = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.") _A : Optional[Any] = None _A : Tuple = None else: with open(__lowerCamelCase , encoding="utf-8") as merges_handle: _A : Optional[int] = merges_handle.read().split("\n")[:-1] _A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges] _A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase)))) _A : List[Any] = {} @property def _lowerCamelCase ( self) -> int: return len(self.decoder) def _lowerCamelCase ( self) -> Dict: return dict(self.encoder , **self.added_tokens_encoder) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: _A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A : int = get_pairs(__lowerCamelCase) if not pairs: return token while True: _A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf"))) if bigram not in self.bpe_ranks: break _A , _A : Optional[int] = bigram _A : int = [] _A : str = 0 while i < len(__lowerCamelCase): try: _A : str = word.index(__lowerCamelCase , __lowerCamelCase) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) _A : str = j if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 _A : List[str] = tuple(__lowerCamelCase) _A : List[str] = new_word if len(__lowerCamelCase) == 1: break else: _A : List[Any] = get_pairs(__lowerCamelCase) _A : Tuple = " ".join(__lowerCamelCase) if word == "\n " + BPE_TOKEN_MERGES: _A : List[str] = "\n" + BPE_TOKEN_MERGES if word.endswith(__lowerCamelCase): _A : int = word.replace(__lowerCamelCase , "") _A : int = word.replace(" " , __lowerCamelCase) _A : Union[str, Any] = word return word def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding." "Make sure to provide `merges.txt` file at instantiation to enable " "encoding.") if self.do_lower_case: _A : List[Any] = text.lower() _A : Optional[int] = text.split() _A : List[str] = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" "))) return split_tokens def _lowerCamelCase ( self , __lowerCamelCase) -> int: return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token)) def _lowerCamelCase ( self , __lowerCamelCase) -> str: _A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token) return result def _lowerCamelCase ( self , __lowerCamelCase) -> str: _A : str = " ".join(__lowerCamelCase) # make sure @@ tokens are concatenated _A : int = "".join(string.split(__lowerCamelCase)) return string def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return _A : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) _A : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]) with open(__lowerCamelCase , "w" , encoding="utf-8") as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n") _A : Union[str, Any] = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCamelCase , "w" , encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]): if index != token_index: logger.warning( F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!") _A : Optional[int] = token_index writer.write(" ".join(__lowerCamelCase) + "\n") index += 1 return (vocab_file, merges_file)
11
1
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever lowerCAmelCase__ = logging.getLogger(__name__) class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None) -> int: super().__init__( __lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , ) _A : int = None def _lowerCamelCase ( self , __lowerCamelCase) -> List[Any]: logger.info("initializing retrieval") # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("dist initialized") # needs to be set manually _A : Any = self._infer_socket_ifname() # avoid clash with the NCCL port _A : int = str(distributed_port + 1) _A : Tuple = dist.new_group(ranks=__lowerCamelCase , backend="gloo") # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("dist not initialized / main") self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group) def _lowerCamelCase ( self) -> Optional[Any]: return dist.get_rank(group=self.process_group) == 0 def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa) -> Union[str, Any]: _A : Tuple = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase) dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group) return target_tensor def _lowerCamelCase ( self) -> List[str]: _A : Any = psutil.net_if_addrs() # a hacky way to deal with varying network interface names _A : Optional[Any] = next((addr for addr in addrs if addr.startswith("e")) , __lowerCamelCase) return ifname def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Tuple[np.ndarray, List[dict]]: # single GPU training if not dist.is_initialized(): _A , _A : int = self._main_retrieve(__lowerCamelCase , __lowerCamelCase) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase) # distributed training _A : Tuple = dist.get_world_size(group=self.process_group) # gather logic _A : Optional[Any] = None if self._is_main(): _A : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(__lowerCamelCase)] dist.gather(torch.tensor(__lowerCamelCase) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group) # scatter logic _A : List[str] = question_hidden_states.shape[0] _A : str = [] _A : int = [] if self._is_main(): assert len(__lowerCamelCase) == world_size _A , _A : Tuple = self._main_retrieve(torch.cat(__lowerCamelCase).numpy() , __lowerCamelCase) _A , _A : Tuple = torch.tensor(__lowerCamelCase), torch.tensor(__lowerCamelCase) _A : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase) _A : Any = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase) _A : int = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa) _A : List[str] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]]) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase)
11
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "vit_mae" def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int: super().__init__(**__lowerCamelCase) _A : int = hidden_size _A : List[str] = num_hidden_layers _A : List[Any] = num_attention_heads _A : Optional[Any] = intermediate_size _A : Optional[int] = hidden_act _A : List[Any] = hidden_dropout_prob _A : List[Any] = attention_probs_dropout_prob _A : Union[str, Any] = initializer_range _A : str = layer_norm_eps _A : Any = image_size _A : int = patch_size _A : int = num_channels _A : Dict = qkv_bias _A : Tuple = decoder_num_attention_heads _A : Tuple = decoder_hidden_size _A : List[str] = decoder_num_hidden_layers _A : Optional[Any] = decoder_intermediate_size _A : List[str] = mask_ratio _A : Union[str, Any] = norm_pix_loss
11
1
from collections.abc import Sequence from queue import Queue class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None) -> Union[str, Any]: _A : str = start _A : Optional[int] = end _A : List[str] = val _A : Tuple = (start + end) // 2 _A : Any = left _A : List[Any] = right def __repr__( self) -> Any: return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})" class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase) -> List[str]: _A : str = collection _A : Optional[Any] = function if self.collection: _A : Optional[int] = self._build_tree(0 , len(__lowerCamelCase) - 1) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Tuple: self._update_tree(self.root , __lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> List[str]: return self._query_range(self.root , __lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> List[str]: if start == end: return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.collection[start]) _A : List[Any] = (start + end) // 2 _A : int = self._build_tree(__lowerCamelCase , __lowerCamelCase) _A : Optional[Any] = self._build_tree(mid + 1 , __lowerCamelCase) return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.fn(left.val , right.val) , __lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> int: if node.start == i and node.end == i: _A : List[str] = val return if i <= node.mid: self._update_tree(node.left , __lowerCamelCase , __lowerCamelCase) else: self._update_tree(node.right , __lowerCamelCase , __lowerCamelCase) _A : str = self.fn(node.left.val , node.right.val) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> str: if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , __lowerCamelCase , __lowerCamelCase) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , __lowerCamelCase , node.mid) , self._query_range(node.right , node.mid + 1 , __lowerCamelCase) , ) else: # range in right child tree return self._query_range(node.right , __lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> Dict: if self.root is not None: _A : Optional[int] = Queue() queue.put(self.root) while not queue.empty(): _A : Tuple = queue.get() yield node if node.left is not None: queue.put(node.left) if node.right is not None: queue.put(node.right) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('*' * 50) lowerCAmelCase__ = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
11
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
1
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler') class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True , __lowerCamelCase = False) -> Dict: _A : Dict = scheduler _A : Union[str, Any] = optimizers if isinstance(__lowerCamelCase , (list, tuple)) else [optimizers] _A : int = split_batches _A : int = step_with_optimizer _A : Any = GradientState() def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step _A : List[Any] = AcceleratorState().num_processes for _ in range(__lowerCamelCase): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , "total_steps"): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase) else: self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> Union[str, Any]: return self.scheduler.get_last_lr() def _lowerCamelCase ( self) -> Optional[Any]: return self.scheduler.state_dict() def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: self.scheduler.load_state_dict(__lowerCamelCase) def _lowerCamelCase ( self) -> Tuple: return self.scheduler.get_lr() def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: return self.scheduler.print_lr(*__lowerCamelCase , **__lowerCamelCase)
11
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase__ = float('nan') class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase) -> Optional[Any]: _A : List[Any] = sys.stdout _A : str = open(__lowerCamelCase , "a") def __getattr__( self , __lowerCamelCase) -> List[str]: return getattr(self.stdout , __lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> str: self.stdout.write(__lowerCamelCase) # strip tqdm codes self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M)) def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ): _A : Tuple = [] # deal with critical env vars _A : Dict = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: _A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ ) if val is not None: cmd.append(f"{key}={val}" ) # python executable (not always needed if the script is executable) _A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(UpperCamelCase__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes _A : Tuple = [] _A : Dict = "" while len(UpperCamelCase__ ) > 0: current_line += f"{cmd.pop(0 )} " if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(UpperCamelCase__ ) _A : Union[str, Any] = "" return "\\\n".join(UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ): # unwrap multi-line input _A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own _A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += f" --output_dir {output_dir}" # ensure we have --overwrite_output_dir _A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , ) _A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams _A : Tuple = variation.replace(" " , "-" ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f: f.write(result.stdout ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f: _A : List[str] = json.load(UpperCamelCase__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ): _A : Union[str, Any] = [] _A : Optional[int] = [] _A : Any = f"{id}: {variation:<{longest_variation_len}}" _A : Dict = f"{preamble}: " _A : Union[str, Any] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ): _A : Optional[Any] = process_run_single( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _A : Optional[Any] = single_run_metrics[target_metric_key] if not math.isnan(UpperCamelCase__ ): metrics.append(UpperCamelCase__ ) results.append(UpperCamelCase__ ) outcome += "✓" else: outcome += "✘" _A : str = f"\33[2K\r{outcome}" if len(UpperCamelCase__ ) > 0: _A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} _A : Any = round(mean_metrics[target_metric_key] , 2 ) _A : Tuple = f"{outcome} {mean_target}" if len(UpperCamelCase__ ) > 1: results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}" print(UpperCamelCase__ ) _A : Optional[int] = variation return mean_metrics else: print(UpperCamelCase__ ) return {variation_key: variation, target_metric_key: nan} def _UpperCAmelCase (): _A : int = torch.cuda.get_device_properties(torch.device("cuda" ) ) return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n" def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ): _A : Any = pd.DataFrame(UpperCamelCase__ ) _A : List[str] = "variation" _A : List[Any] = "diff_%" _A : int = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan _A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(UpperCamelCase__ ): # as a fallback, use the minimal value as the sentinel _A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(UpperCamelCase__ ): _A : Optional[Any] = df.apply( lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns _A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys] _A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols # capitalize _A : Tuple = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible _A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" ) _A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" ) _A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] print("\n\n".join(UpperCamelCase__ ) ) def _UpperCAmelCase (): _A : int = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , ) parser.add_argument( "--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) _A : int = parser.parse_args() _A : Union[str, Any] = args.output_dir Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) _A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ ) # split each dimension into its --foo variations _A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty _A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) ) _A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations ) # split wanted keys _A : str = args.report_metric_keys.split() # capture prints into a log file for convenience _A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt" print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" ) print(f"and this script's output is also piped into {report_fn}" ) _A : Tuple = Tee(UpperCamelCase__ ) print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" ) print(f"Base command: {' '.join(UpperCamelCase__ )}" ) _A : str = "variation" _A : Union[str, Any] = [] for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ): _A : Dict = base_cmd + variation.split() results.append( process_run( id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) ) process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ ) if __name__ == "__main__": main()
11
1
import os def _UpperCAmelCase (): _A : Tuple = os.path.join(os.path.dirname(UpperCamelCase__ ) , "num.txt" ) with open(UpperCamelCase__ ) as file_hand: return str(sum(int(UpperCamelCase__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
11
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') lowerCAmelCase__ = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __SCREAMING_SNAKE_CASE = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Overwrite the cached training and evaluation sets"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "The number of processes to use for the preprocessing."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def _lowerCamelCase ( self) -> int: if self.train_file is not None: _A : Optional[int] = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _A : Dict = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def __call__( self , __lowerCamelCase) -> str: _A : List[Any] = "label" if "label" in features[0].keys() else "labels" _A : Any = [feature.pop(__lowerCamelCase) for feature in features] _A : Optional[int] = len(__lowerCamelCase) _A : int = len(features[0]["input_ids"]) _A : Tuple = [ [{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features ] _A : str = list(chain(*__lowerCamelCase)) _A : Tuple = self.tokenizer.pad( __lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten _A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()} # Add back labels _A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa) return batch def _UpperCAmelCase (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A : int = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) datasets.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _A : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _A : List[str] = {} if data_args.train_file is not None: _A : Optional[int] = data_args.train_file if data_args.validation_file is not None: _A : Tuple = data_args.validation_file _A : Union[str, Any] = data_args.train_file.split("." )[-1] _A : List[str] = load_dataset( UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. _A : Union[str, Any] = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _A : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _A : List[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _A : str = [f"ending{i}" for i in range(4 )] _A : Union[str, Any] = "sent1" _A : str = "sent2" if data_args.max_seq_length is None: _A : Any = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) _A : Optional[Any] = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _A : int = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCamelCase__ : List[Any] ): _A : List[Any] = [[context] * 4 for context in examples[context_name]] _A : Any = examples[question_header_name] _A : Union[str, Any] = [ [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ ) ] # Flatten out _A : Dict = list(chain(*UpperCamelCase__ ) ) _A : List[Any] = list(chain(*UpperCamelCase__ ) ) # Tokenize _A : str = tokenizer( UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) _A : Optional[int] = raw_datasets["train"] if data_args.max_train_samples is not None: _A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples ) _A : Any = train_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): _A : Optional[int] = train_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) _A : Optional[int] = raw_datasets["validation"] if data_args.max_eval_samples is not None: _A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples ) _A : Dict = eval_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): _A : List[str] = eval_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator _A : str = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCamelCase__ : Tuple ): _A , _A : List[str] = eval_predictions _A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _A : List[str] = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: _A : Any = None if training_args.resume_from_checkpoint is not None: _A : Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A : int = last_checkpoint _A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) trainer.save_model() # Saves the tokenizer too for easy upload _A : Optional[int] = train_result.metrics _A : Tuple = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ ) ) _A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("train" , UpperCamelCase__ ) trainer.save_metrics("train" , UpperCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _A : List[Any] = trainer.evaluate() _A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ ) _A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("eval" , UpperCamelCase__ ) trainer.save_metrics("eval" , UpperCamelCase__ ) _A : Tuple = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase__ ) else: trainer.create_model_card(**UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
11
1
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--txt2img_unclip', default='kakaobrain/karlo-v1-alpha', type=str, required=False, help='The pretrained txt2img unclip.', ) lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowerCAmelCase__ = CLIPImageProcessor() lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14') lowerCAmelCase__ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
11
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ]) class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self) -> str: if self.framework == "pytorch": subprocess.run( F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , ) assert hasattr(self , "env") def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: _A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings _A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , ) def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv") @parameterized.expand([(2,)]) def _lowerCamelCase ( self , __lowerCamelCase) -> Any: # create estimator _A : Union[str, Any] = self.create_estimator(__lowerCamelCase) # run training estimator.fit() # result dataframe _A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) _A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _A : Optional[Any] = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
11
1
lowerCAmelCase__ = { 'a': 'AAAAA', 'b': 'AAAAB', 'c': 'AAABA', 'd': 'AAABB', 'e': 'AABAA', 'f': 'AABAB', 'g': 'AABBA', 'h': 'AABBB', 'i': 'ABAAA', 'j': 'BBBAA', 'k': 'ABAAB', 'l': 'ABABA', 'm': 'ABABB', 'n': 'ABBAA', 'o': 'ABBAB', 'p': 'ABBBA', 'q': 'ABBBB', 'r': 'BAAAA', 's': 'BAAAB', 't': 'BAABA', 'u': 'BAABB', 'v': 'BBBAB', 'w': 'BABAA', 'x': 'BABAB', 'y': 'BABBA', 'z': 'BABBB', ' ': ' ', } lowerCAmelCase__ = {value: key for key, value in encode_dict.items()} def _UpperCAmelCase (UpperCamelCase__ : str ): _A : Union[str, Any] = "" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("encode() accepts only letters of the alphabet and spaces" ) return encoded def _UpperCAmelCase (UpperCamelCase__ : str ): if set(UpperCamelCase__ ) - {"A", "B", " "} != set(): raise Exception("decode() accepts only 'A', 'B' and spaces" ) _A : Optional[int] = "" for word in coded.split(): while len(UpperCamelCase__ ) != 0: decoded += decode_dict[word[:5]] _A : Optional[int] = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
11
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"] __SCREAMING_SNAKE_CASE = "OwlViTImageProcessor" __SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]: _A : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __lowerCamelCase , ) _A : List[Any] = kwargs.pop("feature_extractor") _A : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(__lowerCamelCase , __lowerCamelCase) def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none.") if text is not None: if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)): _A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)] elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase): _A : Optional[Any] = [] # Maximum number of queries across batch _A : str = max([len(__lowerCamelCase) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(__lowerCamelCase) != max_num_queries: _A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase)) _A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) encodings.append(__lowerCamelCase) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings") if return_tensors == "np": _A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch _A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0) _A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0) else: raise ValueError("Target return tensor type could not be returned") _A : Optional[Any] = BatchEncoding() _A : Tuple = input_ids _A : Dict = attention_mask if query_images is not None: _A : Optional[Any] = BatchEncoding() _A : List[str] = self.image_processor( __lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values _A : Union[str, Any] = query_pixel_values if images is not None: _A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) if text is not None and images is not None: _A : Tuple = image_features.pixel_values return encoding elif query_images is not None and images is not None: _A : int = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]: return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase) @property def _lowerCamelCase ( self) -> int: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , ) return self.image_processor_class @property def _lowerCamelCase ( self) -> List[str]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , ) return self.image_processor
11
1
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger('transformers.models.speecht5') lowerCAmelCase__ = { 'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm', 'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection', 'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv', 'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed', } lowerCAmelCase__ = { 'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens', 'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha', } lowerCAmelCase__ = { 'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0', 'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1', 'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer', 'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha', 'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer', } lowerCAmelCase__ = { 'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out', 'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out', 'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv', 'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm', 'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv', 'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm', 'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv', 'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm', 'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv', 'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm', 'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv', 'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm', } lowerCAmelCase__ = { 'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens', } lowerCAmelCase__ = { 'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head', } lowerCAmelCase__ = { 'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj', 'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj', 'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj', 'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj', 'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm', 'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense', 'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense', 'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm', 'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k', } lowerCAmelCase__ = { 'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj', 'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj', 'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj', 'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj', 'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm', 'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj', 'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj', 'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj', 'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj', 'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm', 'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense', 'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense', 'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm', } lowerCAmelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } lowerCAmelCase__ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } lowerCAmelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } lowerCAmelCase__ = [] lowerCAmelCase__ = [ 'encoder.version', 'encoder.layers.*.norm_k.weight', 'encoder.layers.*.norm_k.bias', 'decoder.version', 'decoder.layers.*.norm_k.weight', 'decoder.layers.*.norm_k.bias', 'decoder.pos_emb.pe_k', 'speech_encoder_prenet.embed_positions._float_tensor', 'text_decoder_prenet.embed_positions._float_tensor', ] lowerCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'speech_decoder_prenet.*', 'speech_decoder_postnet.*', ] lowerCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'speech_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] lowerCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ): for attribute in key.split("." ): _A : Tuple = getattr(UpperCamelCase__ , UpperCamelCase__ ) if weight_type is not None: _A : str = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape else: _A : Optional[int] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": _A : Optional[int] = value elif weight_type == "weight_g": _A : str = value elif weight_type == "weight_v": _A : Optional[int] = value elif weight_type == "bias": _A : Union[str, Any] = value elif weight_type == "running_mean": _A : Any = value elif weight_type == "running_var": _A : List[str] = value elif weight_type == "num_batches_tracked": _A : Any = value else: _A : List[str] = value logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." ) def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: _A , _A : Optional[int] = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): _A : List[str] = [] if task == "s2t": _A : List[str] = hf_model.speechta.encoder.prenet.feature_encoder _A : int = MAPPING_S2T _A : List[Any] = IGNORE_KEYS_S2T elif task == "t2s": _A : Tuple = None _A : Optional[Any] = MAPPING_T2S _A : Any = IGNORE_KEYS_T2S elif task == "s2s": _A : int = hf_model.speechta.encoder.prenet.feature_encoder _A : Tuple = MAPPING_S2S _A : str = IGNORE_KEYS_S2S else: raise ValueError(f"Unsupported task: {task}" ) for name, value in fairseq_dict.items(): if should_ignore(UpperCamelCase__ , UpperCamelCase__ ): logger.info(f"{name} was ignored" ) continue _A : List[str] = False if "conv_layers" in name: load_conv_layer( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == "group" , ) _A : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: _A , _A : str = key.split(".*." ) if prefix in name and suffix in name: _A : Union[str, Any] = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: _A : List[Any] = True if "*" in mapped_key: _A : Optional[int] = name.split(UpperCamelCase__ )[0].split("." )[-2] _A : Dict = mapped_key.replace("*" , UpperCamelCase__ ) if "weight_g" in name: _A : List[Any] = "weight_g" elif "weight_v" in name: _A : str = "weight_v" elif "bias" in name: _A : List[Any] = "bias" elif "weight" in name: _A : Dict = "weight" elif "running_mean" in name: _A : Any = "running_mean" elif "running_var" in name: _A : Optional[int] = "running_var" elif "num_batches_tracked" in name: _A : Optional[int] = "num_batches_tracked" else: _A : Optional[int] = None set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) continue if not is_used: unused_weights.append(UpperCamelCase__ ) logger.warning(f"Unused weights: {unused_weights}" ) def _UpperCAmelCase (UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : str ): _A : List[str] = full_name.split("conv_layers." )[-1] _A : int = name.split("." ) _A : Any = int(items[0] ) _A : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) _A : List[Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) _A : Optional[Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) _A : int = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) _A : Tuple = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(UpperCamelCase__ ) @torch.no_grad() def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=None , ): if config_path is not None: _A : List[Any] = SpeechTaConfig.from_pretrained(UpperCamelCase__ ) else: _A : Optional[int] = SpeechTaConfig() if task == "s2t": _A : Tuple = config.max_text_positions _A : Optional[Any] = SpeechTaForSpeechToText(UpperCamelCase__ ) elif task == "t2s": _A : Optional[Any] = 1876 _A : List[Any] = 600 _A : Optional[Any] = config.max_speech_positions _A : List[Any] = SpeechTaForTextToSpeech(UpperCamelCase__ ) elif task == "s2s": _A : Optional[int] = 1876 _A : str = config.max_speech_positions _A : Optional[int] = SpeechTaForSpeechToSpeech(UpperCamelCase__ ) else: raise ValueError(f"Unknown task name: {task}" ) if vocab_path: _A : Dict = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it _A : Dict = AddedToken("<mask>" , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) _A : Dict = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) _A : int = SpeechTaFeatureExtractor() _A : List[str] = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) _A : Union[str, Any] = torch.load(UpperCamelCase__ ) recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase__ , UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) if repo_id: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '--task', default='s2t', type=str, help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) lowerCAmelCase__ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
11
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]): _A : Optional[int] = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(__lowerCamelCase) def _lowerCamelCase ( self) -> int: _A : Optional[int] = "sshleifer/tiny-gpt2" _A : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : List[str] = PyTorchBenchmark(__lowerCamelCase) _A : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Dict: _A : int = "sgugger/tiny-distilbert-classification" _A : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , ) _A : Dict = PyTorchBenchmark(__lowerCamelCase) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Optional[Any]: _A : Tuple = "sshleifer/tiny-gpt2" _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase) _A : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision") def _lowerCamelCase ( self) -> int: _A : Any = "sshleifer/tiny-gpt2" _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Any = PyTorchBenchmark(__lowerCamelCase) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Any: _A : Union[str, Any] = "sshleifer/tiny-gpt2" _A : Any = AutoConfig.from_pretrained(__lowerCamelCase) # set architectures equal to `None` _A : Dict = None _A : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> int: _A : List[Any] = "sshleifer/tiny-gpt2" _A : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase) _A : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == "cpu" , "Can't do half precision") def _lowerCamelCase ( self) -> Optional[Any]: _A : Any = "sshleifer/tiny-gpt2" _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , ) _A : List[Any] = PyTorchBenchmark(__lowerCamelCase) _A : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> str: _A : List[str] = "sshleifer/tiny-gpt2" _A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> int: _A : Tuple = "sshleifer/tinier_bart" _A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> str: _A : List[Any] = "sshleifer/tiny-gpt2" _A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> int: _A : int = "sshleifer/tinier_bart" _A : str = AutoConfig.from_pretrained(__lowerCamelCase) _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> Dict: _A : List[str] = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , ) _A : Tuple = PyTorchBenchmark(__lowerCamelCase) benchmark.run() self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists()) def _lowerCamelCase ( self) -> int: _A : Dict = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(__lowerCamelCase): self.assertTrue(hasattr(__lowerCamelCase , "sequential")) self.assertTrue(hasattr(__lowerCamelCase , "cumulative")) self.assertTrue(hasattr(__lowerCamelCase , "current")) self.assertTrue(hasattr(__lowerCamelCase , "total")) with tempfile.TemporaryDirectory() as tmp_dir: _A : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , ) _A : Optional[int] = PyTorchBenchmark(__lowerCamelCase) _A : Dict = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
11
1
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) lowerCAmelCase__ = 'bert-base-cased' lowerCAmelCase__ = 'fp16' lowerCAmelCase__ = 'bf16' lowerCAmelCase__ = [FPaa, BFaa] @require_fsdp @require_cuda class lowerCAmelCase__ ( a): '''simple docstring''' def _lowerCamelCase ( self) -> Optional[int]: super().setUp() _A : int = dict( ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , ) def _lowerCamelCase ( self) -> List[Any]: from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__lowerCamelCase): _A : List[Any] = self.dist_env.copy() _A : Tuple = F"{i + 1}" _A : List[str] = strategy with mockenv_context(**__lowerCamelCase): _A : List[Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1)) def _lowerCamelCase ( self) -> str: from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__lowerCamelCase): _A : Union[str, Any] = self.dist_env.copy() _A : int = prefetch_policy with mockenv_context(**__lowerCamelCase): _A : Union[str, Any] = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1)) def _lowerCamelCase ( self) -> List[str]: from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__lowerCamelCase): _A : List[Any] = self.dist_env.copy() _A : Optional[int] = state_dict_type with mockenv_context(**__lowerCamelCase): _A : Dict = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1)) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only) def _lowerCamelCase ( self) -> List[str]: _A : Optional[Any] = AutoModel.from_pretrained(__lowerCamelCase) for policy in FSDP_AUTO_WRAP_POLICY: _A : Tuple = self.dist_env.copy() _A : Any = policy if policy == "TRANSFORMER_BASED_WRAP": _A : int = "BertLayer" elif policy == "SIZE_BASED_WRAP": _A : str = "2000" with mockenv_context(**__lowerCamelCase): _A : Tuple = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy) _A : Optional[int] = self.dist_env.copy() _A : str = "TRANSFORMER_BASED_WRAP" _A : Union[str, Any] = "T5Layer" with mockenv_context(**__lowerCamelCase): _A : Union[str, Any] = FullyShardedDataParallelPlugin() with self.assertRaises(__lowerCamelCase) as cm: fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase) self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception)) _A : Tuple = self.dist_env.copy() _A : Optional[Any] = "SIZE_BASED_WRAP" _A : int = "0" with mockenv_context(**__lowerCamelCase): _A : Any = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase) self.assertIsNone(fsdp_plugin.auto_wrap_policy) def _lowerCamelCase ( self) -> List[Any]: from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: _A : Optional[int] = self.dist_env.copy() _A : List[str] = mp_dtype with mockenv_context(**__lowerCamelCase): _A : Optional[Any] = Accelerator() if mp_dtype == "fp16": _A : Union[str, Any] = torch.floataa elif mp_dtype == "bf16": _A : Optional[Any] = torch.bfloataa _A : List[Any] = MixedPrecision(param_dtype=__lowerCamelCase , reduce_dtype=__lowerCamelCase , buffer_dtype=__lowerCamelCase) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __lowerCamelCase) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , __lowerCamelCase)) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler) AcceleratorState._reset_state(__lowerCamelCase) def _lowerCamelCase ( self) -> Any: from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: _A : str = self.dist_env.copy() _A : List[Any] = str(__lowerCamelCase).lower() with mockenv_context(**__lowerCamelCase): _A : Optional[int] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__lowerCamelCase)) @require_fsdp @require_multi_gpu @slow class lowerCAmelCase__ ( a): '''simple docstring''' def _lowerCamelCase ( self) -> int: super().setUp() _A : List[str] = 0.8_2 _A : List[Any] = [ "fsdp_shard_grad_op_transformer_based_wrap", "fsdp_full_shard_transformer_based_wrap", ] _A : Tuple = { "multi_gpu_fp16": 3_2_0_0, "fsdp_shard_grad_op_transformer_based_wrap_fp16": 2_0_0_0, "fsdp_full_shard_transformer_based_wrap_fp16": 1_9_0_0, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } _A : int = 1_6_0 _A : Optional[Any] = 1_6_0 _A : Optional[Any] = inspect.getfile(accelerate.test_utils) _A : int = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps"]) def _lowerCamelCase ( self) -> Optional[int]: _A : Tuple = os.path.join(self.test_scripts_folder , "test_performance.py") _A : Optional[int] = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"] for config in self.performance_configs: _A : Optional[Any] = cmd.copy() for i, strategy in enumerate(__lowerCamelCase): if strategy.lower() in config: cmd_config.append(F"--fsdp_sharding_strategy={i+1}") break if "fp32" in config: cmd_config.append("--mixed_precision=no") else: cmd_config.append("--mixed_precision=fp16") if "cpu_offload" in config: cmd_config.append("--fsdp_offload_params=True") for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F"--fsdp_auto_wrap_policy={policy}") break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer") elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000") cmd_config.extend( [ self.test_file_path, F"--output_dir={self.tmpdir}", F"--performance_lower_bound={self.performance_lower_bound}", ]) with patch_environment(omp_num_threads=1): execute_subprocess_async(__lowerCamelCase , env=os.environ.copy()) def _lowerCamelCase ( self) -> Dict: _A : List[Any] = os.path.join(self.test_scripts_folder , "test_checkpointing.py") _A : Any = [ "accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp", "--mixed_precision=fp16", "--fsdp_transformer_layer_cls_to_wrap=BertLayer", ] for i, strategy in enumerate(__lowerCamelCase): _A : List[Any] = cmd.copy() cmd_config.append(F"--fsdp_sharding_strategy={i+1}") if strategy != "FULL_SHARD": continue _A : List[Any] = len(__lowerCamelCase) for state_dict_type in FSDP_STATE_DICT_TYPE: _A : int = cmd_config[:state_dict_config_index] cmd_config.append(F"--fsdp_state_dict_type={state_dict_type}") cmd_config.extend( [ self.test_file_path, F"--output_dir={self.tmpdir}", "--partial_train_epoch=1", ]) with patch_environment(omp_num_threads=1): execute_subprocess_async(__lowerCamelCase , env=os.environ.copy()) _A : Dict = cmd_config[:-1] _A : Any = os.path.join(self.tmpdir , "epoch_0") cmd_config.extend( [ F"--resume_from_checkpoint={resume_from_checkpoint}", ]) with patch_environment(omp_num_threads=1): execute_subprocess_async(__lowerCamelCase , env=os.environ.copy()) def _lowerCamelCase ( self) -> List[Any]: _A : Optional[int] = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py") _A : List[Any] = [ "accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): _A : List[str] = cmd.copy() if "fp16" in spec: cmd_config.extend(["--mixed_precision=fp16"]) else: cmd_config.extend(["--mixed_precision=no"]) if "multi_gpu" in spec: continue else: cmd_config.extend(["--use_fsdp"]) for i, strategy in enumerate(__lowerCamelCase): if strategy.lower() in spec: cmd_config.append(F"--fsdp_sharding_strategy={i+1}") break if "cpu_offload" in spec: cmd_config.append("--fsdp_offload_params=True") for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F"--fsdp_auto_wrap_policy={policy}") break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer") elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000") cmd_config.extend( [ self.test_file_path, F"--output_dir={self.tmpdir}", F"--peak_memory_upper_bound={peak_mem_upper_bound}", F"--n_train={self.n_train}", F"--n_val={self.n_val}", ]) with patch_environment(omp_num_threads=1): execute_subprocess_async(__lowerCamelCase , env=os.environ.copy())
11
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } lowerCAmelCase__ = { 'facebook/nllb-large-en-ro': 10_24, 'facebook/nllb-200-distilled-600M': 10_24, } # fmt: off lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = NllbTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it _A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token _A : Optional[int] = legacy_behaviour super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , ) _A : int = vocab_file _A : Optional[Any] = False if not self.vocab_file else True _A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "eng_Latn" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : List[str] = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : Tuple = [self.sep_token_id] _A : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : List[Any] = src_lang _A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Tuple = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding: _A : Tuple = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> str: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[str]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = self.convert_tokens_to_ids(__lowerCamelCase) if self.legacy_behaviour: _A : List[str] = [] _A : Dict = [self.eos_token_id, self.cur_lang_code] else: _A : Tuple = [self.cur_lang_code] _A : Optional[Any] = [self.eos_token_id] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase) if self.legacy_behaviour: _A : Tuple = [] _A : Any = [self.eos_token_id, self.cur_lang_code] else: _A : Union[str, Any] = [self.cur_lang_code] _A : str = [self.eos_token_id] _A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : Dict = self.convert_ids_to_tokens(self.suffix_tokens) _A : Union[str, Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : Dict = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ = { 'configuration_efficientformer': [ 'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EfficientFormerConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['EfficientFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'EfficientFormerForImageClassification', 'EfficientFormerForImageClassificationWithTeacher', 'EfficientFormerModel', 'EfficientFormerPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFEfficientFormerForImageClassification', 'TFEfficientFormerForImageClassificationWithTeacher', 'TFEfficientFormerModel', 'TFEfficientFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {} lowerCAmelCase__ = {} lowerCAmelCase__ = {} def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ): _A : Union[str, Any] = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" ) _A : Dict = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" ) _A : Dict = format_type def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ): _A : Union[str, Any] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): _A : Union[str, Any] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ): if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ): _A : List[str] = get_format_type_from_alias(UpperCamelCase__ ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**UpperCamelCase__ ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
11
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'], 'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['VisionTextDualEncoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['FlaxVisionTextDualEncoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['TFVisionTextDualEncoderModel'] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
11
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) _A : int = (boundary[1] - boundary[0]) / steps _A : Any = boundary[0] _A : List[Any] = boundary[1] _A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _A : str = 0.0 y += (h / 2.0) * f(UpperCamelCase__ ) for i in x_i: # print(i) y += h * f(UpperCamelCase__ ) y += (h / 2.0) * f(UpperCamelCase__ ) return y def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ): _A : Optional[int] = a + h while x < (b - h): yield x _A : Dict = x + h def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here _A : Any = (x - 0) * (x - 0) return y def _UpperCAmelCase (): _A : Optional[Any] = 0.0 # Lower bound of integration _A : Optional[int] = 1.0 # Upper bound of integration _A : List[Any] = 10.0 # define number of steps or resolution _A : Any = [a, b] # define boundary of integration _A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ ) print(f"y = {y}" ) if __name__ == "__main__": main()
11
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase__ = { 'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['VivitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'VivitModel', 'VivitPreTrainedModel', 'VivitForVideoClassification', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @parameterized.expand([(None,), ("foo.json",)]) def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]: _A : str = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) _A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 5_0) self.assertEqual(loaded_config.max_length , 2_0) self.assertEqual(loaded_config.max_time , __lowerCamelCase) def _lowerCamelCase ( self) -> Optional[int]: _A : Optional[int] = AutoConfig.from_pretrained("gpt2") _A : int = GenerationConfig.from_model_config(__lowerCamelCase) _A : List[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(__lowerCamelCase , __lowerCamelCase) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _lowerCamelCase ( self) -> Optional[Any]: _A : Optional[Any] = GenerationConfig() _A : List[Any] = { "max_new_tokens": 1_0_2_4, "foo": "bar", } _A : List[str] = copy.deepcopy(__lowerCamelCase) _A : int = generation_config.update(**__lowerCamelCase) # update_kwargs was not modified (no side effects) self.assertEqual(__lowerCamelCase , __lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_0_2_4) # `.update()` returns a dictionary of unused kwargs self.assertEqual(__lowerCamelCase , {"foo": "bar"}) def _lowerCamelCase ( self) -> Any: _A : int = GenerationConfig() _A : int = "bar" with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(__lowerCamelCase) _A : Any = GenerationConfig.from_pretrained(__lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , "bar") _A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase) assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config def _lowerCamelCase ( self) -> List[str]: _A : Union[str, Any] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , __lowerCamelCase) self.assertEqual(default_config.num_beams , 1) _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , __lowerCamelCase) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase) _A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @classmethod def _lowerCamelCase ( cls) -> Optional[int]: _A : Dict = TOKEN HfFolder.save_token(__lowerCamelCase) @classmethod def _lowerCamelCase ( cls) -> List[Any]: try: delete_repo(token=cls._token , repo_id="test-generation-config") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org") except HTTPError: pass def _lowerCamelCase ( self) -> Any: _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("test-generation-config" , use_auth_token=self._token) _A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="test-generation-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Union[str, Any] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token) _A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
11
1
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
11
import pickle import numpy as np from matplotlib import pyplot as plt class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str: _A : Optional[int] = bp_numa _A : Dict = bp_numa _A : Tuple = bp_numa _A : List[str] = conva_get[:2] _A : Tuple = conva_get[2] _A : Optional[int] = size_pa _A : Optional[Any] = rate_w _A : Optional[Any] = rate_t _A : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] _A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Any = -2 * np.random.rand(self.conva[1]) + 1 _A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1 _A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1 def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # save model dict with pickle _A : Dict = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(__lowerCamelCase , "wb") as f: pickle.dump(__lowerCamelCase , __lowerCamelCase) print(F"Model saved: {save_path}") @classmethod def _lowerCamelCase ( cls , __lowerCamelCase) -> Any: # read saved model with open(__lowerCamelCase , "rb") as f: _A : Any = pickle.load(__lowerCamelCase) # noqa: S301 _A : Optional[int] = model_dic.get("conv1") conv_get.append(model_dic.get("step_conv1")) _A : str = model_dic.get("size_pooling1") _A : List[str] = model_dic.get("num_bp1") _A : Union[str, Any] = model_dic.get("num_bp2") _A : List[Any] = model_dic.get("num_bp3") _A : Dict = model_dic.get("rate_weight") _A : List[Any] = model_dic.get("rate_thre") # create model instance _A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # modify model parameter _A : List[Any] = model_dic.get("w_conv1") _A : Union[str, Any] = model_dic.get("wkj") _A : str = model_dic.get("vji") _A : List[str] = model_dic.get("thre_conv1") _A : Optional[Any] = model_dic.get("thre_bp2") _A : Dict = model_dic.get("thre_bp3") return conv_ins def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: return 1 / (1 + np.exp(-1 * x)) def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: return round(__lowerCamelCase , 3) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]: # convolution process _A : Tuple = convs[0] _A : Union[str, Any] = convs[1] _A : List[Any] = np.shape(__lowerCamelCase)[0] # get the data slice of original image data, data_focus _A : Tuple = [] for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): _A : Optional[int] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__lowerCamelCase) # calculate the feature map of every single kernel, and saved as list of matrix _A : Optional[Any] = [] _A : Optional[int] = int((size_data - size_conv) / conv_step + 1) for i_map in range(__lowerCamelCase): _A : Optional[int] = [] for i_focus in range(len(__lowerCamelCase)): _A : Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(__lowerCamelCase)) _A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape( __lowerCamelCase , __lowerCamelCase) data_featuremap.append(__lowerCamelCase) # expanding the data slice to One dimenssion _A : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__lowerCamelCase)) _A : Dict = np.asarray(__lowerCamelCase) return focus_list, data_featuremap def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict: # pooling process _A : Optional[Any] = len(featuremaps[0]) _A : str = int(size_map / size_pooling) _A : Optional[int] = [] for i_map in range(len(__lowerCamelCase)): _A : int = featuremaps[i_map] _A : Optional[int] = [] for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase): for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase): _A : str = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__lowerCamelCase)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__lowerCamelCase)) _A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase) featuremap_pooled.append(__lowerCamelCase) return featuremap_pooled def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: # expanding three dimension data to one dimension list _A : Tuple = [] for i in range(len(__lowerCamelCase)): _A : Union[str, Any] = np.shape(data[i]) _A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1]) _A : Optional[Any] = data_listed.getA().tolist()[0] data_expanded.extend(__lowerCamelCase) _A : Optional[Any] = np.asarray(__lowerCamelCase) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: # expanding matrix to one dimension list _A : List[Any] = np.asarray(__lowerCamelCase) _A : Union[str, Any] = np.shape(__lowerCamelCase) _A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Dict = [] _A : Any = 0 for i_map in range(__lowerCamelCase): _A : Union[str, Any] = np.ones((size_map, size_map)) for i in range(0 , __lowerCamelCase , __lowerCamelCase): for j in range(0 , __lowerCamelCase , __lowerCamelCase): _A : List[Any] = pd_pool[ i_pool ] _A : Tuple = i_pool + 1 _A : Optional[Any] = np.multiply( __lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(__lowerCamelCase) return pd_all def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]: # model traning print("----------------------Start Training-------------------------") print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase))) print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase))) _A : Tuple = 0 _A : Dict = [] _A : Optional[Any] = 1_0_0_0_0 while rp < n_repeat and mse >= error_accuracy: _A : Union[str, Any] = 0 print(F"-------------Learning Time {rp}--------------") for p in range(len(__lowerCamelCase)): # print('------------Learning Image: %d--------------'%p) _A : str = np.asmatrix(datas_train[p]) _A : Union[str, Any] = np.asarray(datas_teach[p]) _A , _A : Any = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = np.shape(__lowerCamelCase) _A : List[str] = self._expand(__lowerCamelCase) _A : Tuple = data_bp_input _A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa _A : List[Any] = self.sig(__lowerCamelCase) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa _A : List[str] = self.sig(__lowerCamelCase) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- _A : int = np.multiply( (data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Optional[Any] = np.multiply( np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji) _A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga) _A : Dict = pd_conva_pooled.T.getA().tolist() _A : Optional[Any] = self._calculate_gradient_from_pool( __lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): _A : int = self._expand_mat(pd_conva_all[k_conv]) _A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase) _A : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) _A : Any = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer _A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight _A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight _A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre _A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image _A : Optional[int] = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) _A : Any = rp + 1 _A : Dict = error_count / patterns all_mse.append(__lowerCamelCase) def draw_error(): _A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(__lowerCamelCase , "+-") plt.plot(__lowerCamelCase , "r--") plt.xlabel("Learning Times") plt.ylabel("All_mse") plt.grid(__lowerCamelCase , alpha=0.5) plt.show() print("------------------Training Complished---------------------") print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}")) if draw_e: draw_error() return mse def _lowerCamelCase ( self , __lowerCamelCase) -> int: # model predict _A : Union[str, Any] = [] print("-------------------Start Testing-------------------------") print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase))) for p in range(len(__lowerCamelCase)): _A : int = np.asmatrix(datas_test[p]) _A , _A : List[Any] = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : str = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = self._expand(__lowerCamelCase) _A : List[Any] = data_bp_input _A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa _A : int = self.sig(__lowerCamelCase) _A : int = bp_outa * self.wkj.T - self.thre_bpa _A : Optional[int] = self.sig(__lowerCamelCase) produce_out.extend(bp_outa.getA().tolist()) _A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out] return np.asarray(__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # return the data of image after convoluting process so we can check it out _A : Optional[int] = np.asmatrix(__lowerCamelCase) _A , _A : Tuple = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
11
1
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): _A : Dict = list(range(len(UpperCamelCase__ ) ) ) _A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )] index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ ) _A : float = 0 _A : list[float] = [0] * len(UpperCamelCase__ ) for i in index: if weight[i] <= capacity: _A : Union[str, Any] = 1 max_value += value[i] capacity -= weight[i] else: _A : Optional[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
11
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowerCAmelCase__ = object() # For specifying empty leaf dict `{}` lowerCAmelCase__ = object() def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ): _A : str = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ): _A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )] if matches and all(UpperCamelCase__ ): return True return False def _UpperCAmelCase (UpperCamelCase__ : str ): def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ): for rule, replacement in rules: if _match(UpperCamelCase__ , UpperCamelCase__ ): return replacement return val return replace def _UpperCAmelCase (): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )), (("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )), (("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _UpperCAmelCase (UpperCamelCase__ : List[str] ): _A : int = _get_partition_rules() _A : Optional[int] = _replacement_rules(UpperCamelCase__ ) _A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )} _A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase__ ) )
11
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
11
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) _A : int = input_str.split("_" ) _A : str = 0 if use_pascal else 1 _A : str = words[start_index:] _A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize] _A : Any = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
11
1
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar lowerCAmelCase__ = TypeVar('T') class lowerCAmelCase__ ( Generic[T]): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase) -> None: _A : Any | T = None _A : int = len(__lowerCamelCase) _A : list[T] = [any_type for _ in range(self.N)] + arr _A : Any = fnc self.build() def _lowerCamelCase ( self) -> None: for p in range(self.N - 1 , 0 , -1): _A : int = self.fn(self.st[p * 2] , self.st[p * 2 + 1]) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> None: p += self.N _A : List[str] = v while p > 1: _A : List[Any] = p // 2 _A : Optional[int] = self.fn(self.st[p * 2] , self.st[p * 2 + 1]) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> T | None: # noqa: E741 _A , _A : Optional[Any] = l + self.N, r + self.N _A : T | None = None while l <= r: if l % 2 == 1: _A : Optional[int] = self.st[l] if res is None else self.fn(__lowerCamelCase , self.st[l]) if r % 2 == 0: _A : List[Any] = self.st[r] if res is None else self.fn(__lowerCamelCase , self.st[r]) _A , _A : Optional[Any] = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce lowerCAmelCase__ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] lowerCAmelCase__ = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } lowerCAmelCase__ = SegmentTree(test_array, min) lowerCAmelCase__ = SegmentTree(test_array, max) lowerCAmelCase__ = SegmentTree(test_array, lambda a, b: a + b) def _UpperCAmelCase (): for i in range(len(UpperCamelCase__ ) ): for j in range(UpperCamelCase__ , len(UpperCamelCase__ ) ): _A : Any = reduce(UpperCamelCase__ , test_array[i : j + 1] ) _A : Optional[int] = reduce(UpperCamelCase__ , test_array[i : j + 1] ) _A : int = reduce(lambda UpperCamelCase__ , UpperCamelCase__ : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(UpperCamelCase__ , UpperCamelCase__ ) assert max_range == max_segment_tree.query(UpperCamelCase__ , UpperCamelCase__ ) assert sum_range == sum_segment_tree.query(UpperCamelCase__ , UpperCamelCase__ ) test_all_segments() for index, value in test_updates.items(): lowerCAmelCase__ = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
11
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): _A : Dict = list(range(len(UpperCamelCase__ ) ) ) _A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )] index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ ) _A : float = 0 _A : list[float] = [0] * len(UpperCamelCase__ ) for i in index: if weight[i] <= capacity: _A : Union[str, Any] = 1 max_value += value[i] capacity -= weight[i] else: _A : Optional[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
11
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "beit" def __init__( self , __lowerCamelCase=8_1_9_2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=True , __lowerCamelCase=[3, 5, 7, 1_1] , __lowerCamelCase=[1, 2, 3, 6] , __lowerCamelCase=True , __lowerCamelCase=0.4 , __lowerCamelCase=2_5_6 , __lowerCamelCase=1 , __lowerCamelCase=False , __lowerCamelCase=2_5_5 , **__lowerCamelCase , ) -> str: super().__init__(**__lowerCamelCase) _A : Union[str, Any] = vocab_size _A : Union[str, Any] = hidden_size _A : Optional[Any] = num_hidden_layers _A : str = num_attention_heads _A : List[str] = intermediate_size _A : Any = hidden_act _A : Tuple = hidden_dropout_prob _A : Dict = attention_probs_dropout_prob _A : int = initializer_range _A : Any = layer_norm_eps _A : Optional[int] = image_size _A : Optional[int] = patch_size _A : Optional[Any] = num_channels _A : str = use_mask_token _A : int = use_absolute_position_embeddings _A : List[Any] = use_relative_position_bias _A : List[Any] = use_shared_relative_position_bias _A : Optional[Any] = layer_scale_init_value _A : Dict = drop_path_rate _A : Union[str, Any] = use_mean_pooling # decode head attributes (semantic segmentation) _A : Dict = out_indices _A : Dict = pool_scales # auxiliary head attributes (semantic segmentation) _A : List[Any] = use_auxiliary_head _A : Dict = auxiliary_loss_weight _A : str = auxiliary_channels _A : str = auxiliary_num_convs _A : Optional[Any] = auxiliary_concat_input _A : int = semantic_loss_ignore_index class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = version.parse("1.11") @property def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def _lowerCamelCase ( self) -> float: return 1e-4
11
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None: warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase)
11
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json', } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "timesformer" def __init__( self , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=8 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-6 , __lowerCamelCase=True , __lowerCamelCase="divided_space_time" , __lowerCamelCase=0 , **__lowerCamelCase , ) -> Tuple: super().__init__(**__lowerCamelCase) _A : Any = image_size _A : Union[str, Any] = patch_size _A : Tuple = num_channels _A : Dict = num_frames _A : int = hidden_size _A : Union[str, Any] = num_hidden_layers _A : List[Any] = num_attention_heads _A : Optional[int] = intermediate_size _A : Union[str, Any] = hidden_act _A : List[Any] = hidden_dropout_prob _A : Any = attention_probs_dropout_prob _A : Tuple = initializer_range _A : Tuple = layer_norm_eps _A : str = qkv_bias _A : str = attention_type _A : Dict = drop_path_rate
11
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]: _A : int = parent _A : Optional[Any] = batch_size _A : str = image_size _A : Tuple = patch_size _A : Tuple = num_channels _A : Optional[int] = embed_dim _A : Dict = depths _A : Any = num_heads _A : Any = window_size _A : int = mlp_ratio _A : Any = qkv_bias _A : Union[str, Any] = hidden_dropout_prob _A : Optional[Any] = attention_probs_dropout_prob _A : Dict = drop_path_rate _A : List[Any] = hidden_act _A : Any = use_absolute_embeddings _A : Optional[int] = patch_norm _A : Tuple = layer_norm_eps _A : List[str] = initializer_range _A : Optional[int] = is_training _A : Optional[Any] = scope _A : Optional[int] = use_labels _A : Dict = type_sequence_label_size _A : str = encoder_stride _A : Optional[int] = out_features _A : Optional[int] = out_indices def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _A : Optional[Any] = None if self.use_labels: _A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _A : Optional[int] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]: _A : Dict = MaskFormerSwinModel(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : int = model(__lowerCamelCase) _A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) _A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict: _A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : Dict = model(__lowerCamelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4]) # verify ValueError with self.parent.assertRaises(__lowerCamelCase): _A : Union[str, Any] = ["stem"] _A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) def _lowerCamelCase ( self) -> Dict: _A : Any = self.prepare_config_and_inputs() _A , _A , _A : List[Any] = config_and_inputs _A : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def _lowerCamelCase ( self) -> str: _A : Union[str, Any] = MaskFormerSwinModelTester(self) _A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" )) def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self) -> str: return def _lowerCamelCase ( self) -> List[Any]: _A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase) @unittest.skip("Swin does not use inputs_embeds") def _lowerCamelCase ( self) -> str: pass @unittest.skip("Swin does not support feedforward chunking") def _lowerCamelCase ( self) -> List[Any]: pass def _lowerCamelCase ( self) -> Optional[int]: _A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : Union[str, Any] = model_class(__lowerCamelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _A : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear)) def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : int = model_class(__lowerCamelCase) _A : Optional[int] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A : int = [*signature.parameters.keys()] _A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions") def _lowerCamelCase ( self) -> Tuple: pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Any = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() with torch.no_grad(): _A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)) _A : Tuple = outputs.hidden_states _A : Any = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1) self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase) # Swin has a different seq_length _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self) -> Dict: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Optional[int] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> Tuple: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Optional[int] = 3 _A : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Union[str, Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__lowerCamelCase): _A : Optional[int] = 0 return t def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}): with torch.no_grad(): _A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase) _A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple() def recursive_check(__lowerCamelCase , __lowerCamelCase): if isinstance(__lowerCamelCase , (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase): recursive_check(__lowerCamelCase , __lowerCamelCase) elif isinstance(__lowerCamelCase , __lowerCamelCase): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values()): recursive_check(__lowerCamelCase , __lowerCamelCase) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=( "Tuple and dict output are not equal. Difference:" F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has" F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}." ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase) for model_class in self.all_model_classes: _A : List[Any] = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) _A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) @require_torch class lowerCAmelCase__ ( unittest.TestCase , a): '''simple docstring''' __SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else () __SCREAMING_SNAKE_CASE = MaskFormerSwinConfig def _lowerCamelCase ( self) -> Optional[Any]: _A : Tuple = MaskFormerSwinModelTester(self) def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _A : Union[str, Any] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: _A : Optional[Any] = backbone_class(__lowerCamelCase) backbone.to(__lowerCamelCase) backbone.eval() _A : List[Any] = backbone(**__lowerCamelCase) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __lowerCamelCase) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) # Test output_hidden_states=True _A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names)) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _A , _A , _A : List[str] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels)) # Test output_attentions=True if self.has_attentions: _A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase) self.assertIsNotNone(outputs.attentions)
11
1
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar lowerCAmelCase__ = TypeVar('T') class lowerCAmelCase__ ( Generic[T]): '''simple docstring''' __SCREAMING_SNAKE_CASE = 42 # Cache store of keys __SCREAMING_SNAKE_CASE = 42 # References of the keys in cache __SCREAMING_SNAKE_CASE = 10 # Maximum capacity of cache def __init__( self , __lowerCamelCase) -> None: _A : List[Any] = deque() _A : Union[str, Any] = set() if not n: _A : List[Any] = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0.") else: _A : str = n def _lowerCamelCase ( self , __lowerCamelCase) -> None: if x not in self.key_reference: if len(self.dq_store) == LRUCache._MAX_CAPACITY: _A : Optional[Any] = self.dq_store.pop() self.key_reference.remove(__lowerCamelCase) else: self.dq_store.remove(__lowerCamelCase) self.dq_store.appendleft(__lowerCamelCase) self.key_reference.add(__lowerCamelCase) def _lowerCamelCase ( self) -> None: for k in self.dq_store: print(__lowerCamelCase) def __repr__( self) -> str: return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store)}" if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
11
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
1