code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' lowerCamelCase : int = {} def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> int: """simple docstring""" if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _SCREAMING_SNAKE_CASE =(days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _SCREAMING_SNAKE_CASE =_calculate(days - 1 , _UpperCAmelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _SCREAMING_SNAKE_CASE =_calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _SCREAMING_SNAKE_CASE =_calculate(days - 1 , _UpperCAmelCase , 0 ) _SCREAMING_SNAKE_CASE =state_late + state_absent + state_ontime _SCREAMING_SNAKE_CASE =prizestrings return prizestrings def _lowerCAmelCase ( _UpperCamelCase : int = 30 ) -> int: """simple docstring""" return _calculate(_UpperCAmelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
47
import os import sys import unittest UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers") class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = find_backend(' if not is_torch_available():') self.assertEqual(A , 'torch') # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):') self.assertEqual(A , 'torch_and_transformers') # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _UpperCAmelCase = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):') self.assertEqual(A , 'torch_and_transformers_and_onnx') def _lowerCamelCase ( self : int) -> Dict: """simple docstring""" _UpperCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , A) self.assertIn('torch_and_transformers' , A) self.assertIn('flax_and_transformers' , A) self.assertIn('torch_and_transformers_and_onnx' , A) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch']) self.assertIn('FlaxUNet2DConditionModel' , objects['flax']) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers']) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers']) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy']) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx']) def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'') self.assertEqual(A , '\nCONSTANT = None\n') _UpperCAmelCase = create_dummy_object('function' , '\'torch\'') self.assertEqual( A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n') _UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' _UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'') self.assertEqual(A , A) def _lowerCamelCase ( self : Dict) -> int: """simple docstring""" _UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' _UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']}) self.assertEqual(dummy_files['torch'] , A)
339
0
"""simple docstring""" from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Any = k_size // 2 __lowercase ,__lowercase : int = mgrid[0 - center : k_size - center, 0 - center : k_size - center] __lowercase : int = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) ) return g def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase ,__lowercase : Any = image.shape[0], image.shape[1] # dst image height and width __lowercase : Optional[int] = height - k_size + 1 __lowercase : Union[str, Any] = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows __lowercase : List[str] = zeros((dst_height * dst_width, k_size * k_size) ) __lowercase : Any = 0 for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ): __lowercase : Optional[Any] = ravel(image[i : i + k_size, j : j + k_size] ) __lowercase : Union[str, Any] = window row += 1 # turn the kernel into shape(k*k, 1) __lowercase : int = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase ) __lowercase : Union[str, Any] = ravel(_UpperCAmelCase ) # reshape and get the dst image __lowercase : Optional[int] = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase ) return dst if __name__ == "__main__": # read original image a_ = imread(r'../image_data/lena.jpg') # turn image in gray scale value a_ = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size a_ = gaussian_filter(gray, 3, sigma=1) a_ = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow('gaussian filter with 3x3 mask', gaussianaxa) imshow('gaussian filter with 5x5 mask', gaussianaxa) waitKey()
249
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") UpperCAmelCase__ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , ) UpperCamelCase = field( default=1_0_2_4 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} ) def _lowerCamelCase ( self : str) -> List[Any]: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.') else: _UpperCAmelCase = self.train_file.split('.')[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." _UpperCAmelCase = self.validation_file.split('.')[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __lowerCAmelCase : UpperCamelCase = field( default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def A ( ) -> Optional[int]: '''simple docstring''' # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) datasets.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. _UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: _UpperCAmelCase = data_args.train_file.split('.' )[-1] _UpperCAmelCase = data_args.test_file.split('.' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." _UpperCAmelCase = data_args.test_file else: raise ValueError('Need either a GLUE task or a test file for `do_predict`.' ) for key in data_files.keys(): logger.info(F"load a local file for {key}: {data_files[key]}" ) if data_args.train_file.endswith('.csv' ): # Loading a dataset from local csv files _UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files _UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels _UpperCAmelCase = raw_datasets['train'].features['label'].names _UpperCAmelCase = len(_UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer _UpperCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , ) _UpperCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: _UpperCAmelCase = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _UpperCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. _UpperCAmelCase = {'Refused': 0, 'Entailed': 1} _UpperCAmelCase = {0: 'Refused', 1: 'Entailed'} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ): # Tokenize the texts def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ): _UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )] _UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd _UpperCAmelCase = examples['statement'] _UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) ) _UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ) _UpperCAmelCase = examples['label'] return result with training_args.main_process_first(desc='dataset map pre-processing' ): _UpperCAmelCase = raw_datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) _UpperCAmelCase = raw_datasets['train'] if data_args.max_train_samples is not None: _UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) _UpperCAmelCase = raw_datasets['validation'] if data_args.max_eval_samples is not None: _UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('--do_predict requires a test dataset' ) _UpperCAmelCase = raw_datasets['test'] if data_args.max_predict_samples is not None: _UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ): logger.info(F"Sample {index} of the training set: {train_dataset[index]}." ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCAmelCase : EvalPrediction ): _UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _UpperCAmelCase = default_data_collator elif training_args.fpaa: _UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) else: _UpperCAmelCase = None # Initialize our Trainer _UpperCAmelCase = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) _UpperCAmelCase = train_result.metrics _UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase ) ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , _UpperCAmelCase ) trainer.save_metrics('train' , _UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase ) _UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.log_metrics('eval' , _UpperCAmelCase ) trainer.save_metrics('eval' , _UpperCAmelCase ) if training_args.do_predict: logger.info('*** Predict ***' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. _UpperCAmelCase = predict_dataset.remove_columns('label' ) _UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) _UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' ) if trainer.is_world_process_zero(): with open(_UpperCAmelCase , 'w' ) as writer: logger.info('***** Predict Results *****' ) writer.write('index\tprediction\n' ) for index, item in enumerate(_UpperCAmelCase ): _UpperCAmelCase = label_list[item] writer.write(F"{index}\t{item}\n" ) _UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'} if training_args.push_to_hub: trainer.push_to_hub(**_UpperCAmelCase ) else: trainer.create_model_card(**_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
0
"""simple docstring""" a = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' a = [{'type': 'code', 'content': INSTALL_CONTENT}] a = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
155
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any: '''simple docstring''' _UpperCAmelCase = multiprocessing.Manager() _UpperCAmelCase = manager.list() _UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('timed out' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil _UpperCAmelCase = shutil.rmtree _UpperCAmelCase = os.rmdir _UpperCAmelCase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: _UpperCAmelCase = {} with swallow_io(): with time_limit(_UpperCAmelCase ): exec(_UpperCAmelCase , _UpperCAmelCase ) result.append('passed' ) except TimeoutException: result.append('timed out' ) except BaseException as e: result.append(F"failed: {e}" ) # Needed for cleaning up. _UpperCAmelCase = rmtree _UpperCAmelCase = rmdir _UpperCAmelCase = chdir @contextlib.contextmanager def A ( _UpperCAmelCase : Union[str, Any] ) -> Any: '''simple docstring''' def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ): raise TimeoutException('Timed out!' ) signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase ) signal.signal(signal.SIGALRM , _UpperCAmelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def A ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = WriteOnlyStringIO() with contextlib.redirect_stdout(_UpperCAmelCase ): with contextlib.redirect_stderr(_UpperCAmelCase ): with redirect_stdin(_UpperCAmelCase ): yield @contextlib.contextmanager def A ( ) -> Any: '''simple docstring''' with tempfile.TemporaryDirectory() as dirname: with chdir(_UpperCAmelCase ): yield dirname class __lowerCAmelCase ( A ): pass class __lowerCAmelCase ( io.StringIO ): def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any: """simple docstring""" raise OSError def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]: """simple docstring""" raise OSError def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]: """simple docstring""" raise OSError def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]: """simple docstring""" return False class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore UpperCamelCase = '''stdin''' @contextlib.contextmanager def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' if root == ".": yield return _UpperCAmelCase = os.getcwd() os.chdir(_UpperCAmelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str]=None ) -> Any: '''simple docstring''' if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins _UpperCAmelCase = None _UpperCAmelCase = None import os _UpperCAmelCase = '1' _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import shutil _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import subprocess _UpperCAmelCase = None # type: ignore _UpperCAmelCase = None import sys _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None
339
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class snake_case_ ( __A ): __A : Optional[Any] = "ClapFeatureExtractor" __A : Optional[Any] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self : Optional[int] , lowercase_ : Any , lowercase_ : Optional[Any] ) -> Any: super().__init__(lowercase_ , lowercase_ ) def __call__( self : List[Any] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Any=None , **lowercase_ : Any ) -> Optional[Any]: lowercase__ : Any = kwargs.pop("sampling_rate" , lowercase_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: lowercase__ : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ ) if audios is not None: lowercase__ : Dict = self.feature_extractor( lowercase_ , sampling_rate=lowercase_ , return_tensors=lowercase_ , **lowercase_ ) if text is not None and audios is not None: lowercase__ : Dict = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ ) def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple ) -> Union[str, Any]: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def __UpperCamelCase ( self : Optional[Any] ) -> Tuple: lowercase__ : Optional[Any] = self.tokenizer.model_input_names lowercase__ : Any = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
87
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str: '''simple docstring''' try: _UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: _UpperCAmelCase = strtobool(_UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False) def A ( _UpperCAmelCase : List[str] ) -> List[str]: '''simple docstring''' return unittest.skip('Test was skipped' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> str: '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> str: '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> Dict: '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> List[str]: '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : str ) -> str: '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> str: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> int: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> Any: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict: '''simple docstring''' if test_case is None: return partial(_UpperCAmelCase , version=_UpperCAmelCase ) return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> int: '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase ) UpperCAmelCase__ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def A ( _UpperCAmelCase : List[str] ) -> Any: '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase ) class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase = True @classmethod def _lowerCamelCase ( cls : List[Any]) -> Tuple: """simple docstring""" _UpperCAmelCase = tempfile.mkdtemp() @classmethod def _lowerCamelCase ( cls : Union[str, Any]) -> str: """simple docstring""" if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def _lowerCamelCase ( self : List[str]) -> List[Any]: """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir).glob('**/*'): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A) class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Dict) -> Tuple: """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple: """simple docstring""" _UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def A ( _UpperCAmelCase : List[Any] ) -> int: '''simple docstring''' _UpperCAmelCase = AcceleratorState() _UpperCAmelCase = tensor[None].clone().to(state.device ) _UpperCAmelCase = gather(_UpperCAmelCase ).cpu() _UpperCAmelCase = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _UpperCAmelCase ): return False return True class __lowerCAmelCase : def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]: """simple docstring""" _UpperCAmelCase = returncode _UpperCAmelCase = stdout _UpperCAmelCase = stderr async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]: '''simple docstring''' while True: _UpperCAmelCase = await stream.readline() if line: callback(_UpperCAmelCase ) else: break async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput: '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(_UpperCAmelCase ) ) _UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _UpperCAmelCase = [] _UpperCAmelCase = [] def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ): _UpperCAmelCase = line.decode('utf-8' ).rstrip() sink.append(_UpperCAmelCase ) if not quiet: print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ), ] , timeout=_UpperCAmelCase , ) return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase ) def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput: '''simple docstring''' _UpperCAmelCase = asyncio.get_event_loop() _UpperCAmelCase = loop.run_until_complete( _stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) ) _UpperCAmelCase = ' '.join(_UpperCAmelCase ) if result.returncode > 0: _UpperCAmelCase = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class __lowerCAmelCase ( A ): pass def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple: '''simple docstring''' try: _UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_UpperCAmelCase , 'decode' ): _UpperCAmelCase = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
339
0
'''simple docstring''' from functools import reduce _lowerCAmelCase = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __lowerCAmelCase ( snake_case__ = N ): return max( # mypy cannot properly interpret reduce int(reduce(lambda snake_case__ , snake_case__ : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_UpperCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f'{solution() = }')
298
from __future__ import annotations UpperCAmelCase__ = list[list[int]] # assigning initial values to the grid UpperCAmelCase__ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase__ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool: '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def A ( _UpperCAmelCase : Matrix ) -> Matrix | None: '''simple docstring''' if location := find_empty_location(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): _UpperCAmelCase = digit if sudoku(_UpperCAmelCase ) is not None: return grid _UpperCAmelCase = 0 return None def A ( _UpperCAmelCase : Matrix ) -> None: '''simple docstring''' for row in grid: for cell in row: print(_UpperCAmelCase , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") UpperCAmelCase__ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
339
0
import sys lowerCAmelCase = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def _lowerCamelCase( lowercase__ = N ) -> int: '''simple docstring''' __lowercase= -sys.maxsize - 1 for i in range(len(_UpperCAmelCase ) - 1_2 ): __lowercase= 1 for j in range(1_3 ): product *= int(n[i + j] ) if product > largest_product: __lowercase= product return largest_product if __name__ == "__main__": print(F'{solution() = }')
295
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : List[Any]) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Value('string' , id='sequence'), }) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]: """simple docstring""" import nltk nltk.download('wordnet') if NLTK_VERSION >= version.Version('3.6.5'): nltk.download('punkt') if NLTK_VERSION >= version.Version('3.6.6'): nltk.download('omw-1.4') def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any: """simple docstring""" if NLTK_VERSION >= version.Version('3.6.5'): _UpperCAmelCase = [ meteor_score.single_meteor_score( word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] else: _UpperCAmelCase = [ meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] return {"meteor": np.mean(A)}
339
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowerCAmelCase_ : str = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase_ : Optional[int] = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } lowerCAmelCase_ : Optional[Any] = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } lowerCAmelCase_ : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =VOCAB_FILES_NAMES __a =PRETRAINED_VOCAB_FILES_MAP __a =PRETRAINED_INIT_CONFIGURATION __a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a =SqueezeBertTokenizer def __init__( self : List[str] , __a : Dict=None , __a : int=None , __a : List[str]=True , __a : Any="[UNK]" , __a : int="[SEP]" , __a : List[str]="[PAD]" , __a : Union[str, Any]="[CLS]" , __a : Optional[Any]="[MASK]" , __a : Union[str, Any]=True , __a : Union[str, Any]=None , **__a : str , ): super().__init__( __a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , ) _a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __a ) != do_lower_case or normalizer_state.get("strip_accents" , __a ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __a ) != tokenize_chinese_chars ): _a = getattr(__a , normalizer_state.pop("type" ) ) _a = do_lower_case _a = strip_accents _a = tokenize_chinese_chars _a = normalizer_class(**__a ) _a = do_lower_case def UpperCamelCase__ ( self : List[str] , __a : Any , __a : List[Any]=None ): _a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase__ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ): _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ ( self : Optional[Any] , __a : str , __a : Optional[str] = None ): _a = self._tokenizer.model.save(__a , name=__a ) return tuple(__a )
63
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration UpperCAmelCase__ = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' _UpperCAmelCase = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def A ( _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = list(s_dict.keys() ) for key in keys: _UpperCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: _UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase ) print(F"{key} -> {new_key}" ) _UpperCAmelCase = s_dict.pop(_UpperCAmelCase ) return s_dict def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = emb.weight.shape _UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) _UpperCAmelCase = emb.weight.data return lin_layer def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes: '''simple docstring''' os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) _UpperCAmelCase = os.path.basename(_UpperCAmelCase ) _UpperCAmelCase = url.split('/' )[-2] _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ): raise RuntimeError(F"{download_target} exists and is not a regular file" ) if os.path.isfile(_UpperCAmelCase ): _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop: while True: _UpperCAmelCase = source.read(8_192 ) if not buffer: break output.write(_UpperCAmelCase ) loop.update(len(_UpperCAmelCase ) ) _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' if ".pt" not in checkpoint_path: _UpperCAmelCase = _download(_MODELS[checkpoint_path] ) else: _UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' ) _UpperCAmelCase = original_checkpoint['dims'] _UpperCAmelCase = original_checkpoint['model_state_dict'] _UpperCAmelCase = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(_UpperCAmelCase ) rename_keys(_UpperCAmelCase ) _UpperCAmelCase = True _UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0] _UpperCAmelCase = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) _UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F" but all the following weights are missing {missing}" ) if tie_embeds: _UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCAmelCase = proj_out_weights model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
339
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __magic_name__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __magic_name__ = TaTokenizerFast __magic_name__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __magic_name__ = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
100
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ): UpperCamelCase = None UpperCamelCase = None class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ): UpperCamelCase = datasets.Audio() UpperCamelCase = '''audio''' UpperCamelCase = AudioFolderConfig UpperCamelCase = 42 # definition at the bottom of the script UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' ) UpperCAmelCase__ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] UpperCAmelCase__ = AUDIO_EXTENSIONS
339
0
from __future__ import annotations def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : dict , __UpperCamelCase : str ) -> set[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = set(_UpperCAmelCase ), [start] while stack: SCREAMING_SNAKE_CASE__ = stack.pop() explored.add(_UpperCAmelCase ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(_UpperCAmelCase ) return explored __lowerCamelCase : str = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
219
import sys from collections import defaultdict class __lowerCAmelCase : def __init__( self : int) -> str: """simple docstring""" _UpperCAmelCase = [] def _lowerCamelCase ( self : Any , A : List[str]) -> int: """simple docstring""" return self.node_position[vertex] def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = pos def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCAmelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCAmelCase = 2 * start + 1 else: _UpperCAmelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child] _UpperCAmelCase , _UpperCAmelCase = ( heap[start], positions[start], ) _UpperCAmelCase , _UpperCAmelCase = temp, tempa _UpperCAmelCase = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , A) self.top_to_bottom(A , A , A , A) def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any: """simple docstring""" _UpperCAmelCase = position[index] while index != 0: _UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _UpperCAmelCase = heap[parent] _UpperCAmelCase = position[parent] self.set_position(position[parent] , A) else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , A) break _UpperCAmelCase = parent else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , 0) def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str: """simple docstring""" _UpperCAmelCase = len(A) // 2 - 1 for i in range(A , -1 , -1): self.top_to_bottom(A , A , len(A) , A) def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = positions[0] _UpperCAmelCase = sys.maxsize self.top_to_bottom(A , 0 , len(A) , A) return temp def A ( _UpperCAmelCase : int ) -> Any: '''simple docstring''' _UpperCAmelCase = Heap() _UpperCAmelCase = [0] * len(_UpperCAmelCase ) _UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCAmelCase = [] for vertex in range(len(_UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_UpperCAmelCase ) heap.node_position.append(_UpperCAmelCase ) _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCAmelCase = 0 _UpperCAmelCase = distance heap.heapify(_UpperCAmelCase , _UpperCAmelCase ) for _ in range(1 , len(_UpperCAmelCase ) ): _UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCAmelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_UpperCAmelCase )] ): _UpperCAmelCase = distance heap.bottom_to_top( _UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
339
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ : Optional[Any] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Union[str, Any] = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Tuple = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
200
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]: '''simple docstring''' # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 _UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1 _UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple _UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _UpperCAmelCase = logits[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=0 ) _UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 ) _UpperCAmelCase = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] ) _UpperCAmelCase = tokenizer.mask_token _UpperCAmelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): _UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(_UpperCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base") UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() UpperCAmelCase__ = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
339
0
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class __A ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) __UpperCamelCase : int =sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) sd_pipe.set_scheduler('sample_euler' ) __UpperCamelCase : List[str] ='A painting of a squirrel eating a burger' __UpperCamelCase : List[str] =torch.manual_seed(0 ) __UpperCamelCase : Any =sd_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' ) __UpperCamelCase : Optional[Any] =output.images __UpperCamelCase : int =image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __UpperCamelCase : Union[str, Any] =np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) __UpperCamelCase : Union[str, Any] =sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) sd_pipe.set_scheduler('sample_euler' ) __UpperCamelCase : List[str] ='A painting of a squirrel eating a burger' __UpperCamelCase : List[Any] =torch.manual_seed(0 ) __UpperCamelCase : Optional[Any] =sd_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' ) __UpperCamelCase : Optional[Any] =output.images __UpperCamelCase : Dict =image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __UpperCamelCase : int =np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[Any] =StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) __UpperCamelCase : List[str] =sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) sd_pipe.set_scheduler('sample_dpmpp_2m' ) __UpperCamelCase : List[str] ='A painting of a squirrel eating a burger' __UpperCamelCase : List[str] =torch.manual_seed(0 ) __UpperCamelCase : List[Any] =sd_pipe( [prompt] , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=lowerCamelCase__ , ) __UpperCamelCase : Dict =output.images __UpperCamelCase : Optional[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __UpperCamelCase : Union[str, Any] =np.array( [0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
71
import math import unittest def A ( _UpperCAmelCase : int ) -> bool: '''simple docstring''' assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(11)) self.assertTrue(is_prime(13)) self.assertTrue(is_prime(17)) self.assertTrue(is_prime(19)) self.assertTrue(is_prime(23)) self.assertTrue(is_prime(29)) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" with self.assertRaises(A): is_prime(-19) self.assertFalse( is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , ) self.assertFalse( is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , ) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
339
0
'''simple docstring''' from graphs.minimum_spanning_tree_kruskal import kruskal def _lowerCAmelCase ( ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE =9 _SCREAMING_SNAKE_CASE =[ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _SCREAMING_SNAKE_CASE =kruskal(_UpperCAmelCase , _UpperCAmelCase ) _SCREAMING_SNAKE_CASE =[ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_UpperCAmelCase ) == sorted(_UpperCAmelCase )
47
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : str) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=A , hypotheses=A , min_len=A , max_len=A) }
339
0
"""simple docstring""" from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): __lowercase ,__lowercase : str = coefficient_matrix.shape __lowercase ,__lowercase : Union[str, Any] = constant_matrix.shape if rowsa != colsa: __lowercase : Optional[int] = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(_UpperCAmelCase ) if colsa != 1: __lowercase : str = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(_UpperCAmelCase ) if rowsa != rowsa: __lowercase : Optional[int] = ( '''Coefficient and constant matrices dimensions must be nxn and nx1 but ''' f"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(_UpperCAmelCase ) if len(_UpperCAmelCase ) != rowsa: __lowercase : Dict = ( '''Number of initial values must be equal to number of rows in coefficient ''' f"""matrix but received {len(_UpperCAmelCase )} and {rowsa}""" ) raise ValueError(_UpperCAmelCase ) if iterations <= 0: raise ValueError('''Iterations must be at least 1''' ) __lowercase : Dict = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) __lowercase ,__lowercase : Optional[int] = table.shape strictly_diagonally_dominant(_UpperCAmelCase ) # Iterates the whole matrix for given number of times for _ in range(_UpperCAmelCase ): __lowercase : Optional[Any] = [] for row in range(_UpperCAmelCase ): __lowercase : Union[str, Any] = 0 for col in range(_UpperCAmelCase ): if col == row: __lowercase : Optional[Any] = table[row][col] elif col == cols - 1: __lowercase : Dict = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] __lowercase : List[Any] = (temp + val) / denom new_val.append(_UpperCAmelCase ) __lowercase : List[Any] = new_val return [float(_UpperCAmelCase ) for i in new_val] def __UpperCAmelCase ( __UpperCamelCase ): __lowercase ,__lowercase : Optional[int] = table.shape __lowercase : List[str] = True for i in range(0 , _UpperCAmelCase ): __lowercase : List[Any] = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
249
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCAmelCase__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCAmelCase__ = TaTokenizerFast UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCAmelCase__ = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
339
0
"""simple docstring""" from collections.abc import Generator from math import sin def lowercase (snake_case__ : bytes ) -> bytes: '''simple docstring''' if len(_UpperCAmelCase ) != 32: raise ValueError("""Input must be of length 32""" ) lowerCAmelCase = B"""""" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowercase (snake_case__ : int ) -> bytes: '''simple docstring''' if i < 0: raise ValueError("""Input must be non-negative""" ) lowerCAmelCase = format(_UpperCAmelCase , """08x""" )[-8:] lowerCAmelCase = B"""""" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" ) return little_endian_hex def lowercase (snake_case__ : bytes ) -> bytes: '''simple docstring''' lowerCAmelCase = B"""""" for char in message: bit_string += format(_UpperCAmelCase , """08b""" ).encode("""utf-8""" ) lowerCAmelCase = format(len(_UpperCAmelCase ) , """064b""" ).encode("""utf-8""" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_UpperCAmelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def lowercase (snake_case__ : bytes ) -> Generator[list[int], None, None]: '''simple docstring''' if len(_UpperCAmelCase ) % 512 != 0: raise ValueError("""Input must have length that\'s a multiple of 512""" ) for pos in range(0 , len(_UpperCAmelCase ) , 512 ): lowerCAmelCase = bit_string[pos : pos + 512] lowerCAmelCase = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def lowercase (snake_case__ : int ) -> int: '''simple docstring''' if i < 0: raise ValueError("""Input must be non-negative""" ) lowerCAmelCase = format(_UpperCAmelCase , """032b""" ) lowerCAmelCase = """""" for c in i_str: new_str += "1" if c == "0" else "0" return int(_UpperCAmelCase , 2 ) def lowercase (snake_case__ : int , snake_case__ : int ) -> int: '''simple docstring''' return (a + b) % 2**32 def lowercase (snake_case__ : int , snake_case__ : int ) -> int: '''simple docstring''' if i < 0: raise ValueError("""Input must be non-negative""" ) if shift < 0: raise ValueError("""Shift must be non-negative""" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowercase (snake_case__ : bytes ) -> bytes: '''simple docstring''' lowerCAmelCase = preprocess(_UpperCAmelCase ) lowerCAmelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states lowerCAmelCase = 0x67_452_301 lowerCAmelCase = 0xef_cda_b89 lowerCAmelCase = 0x98_bad_cfe lowerCAmelCase = 0x10_325_476 lowerCAmelCase = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_UpperCAmelCase ): lowerCAmelCase = aa lowerCAmelCase = ba lowerCAmelCase = ca lowerCAmelCase = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f lowerCAmelCase = d ^ (b & (c ^ d)) lowerCAmelCase = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f lowerCAmelCase = c ^ (d & (b ^ c)) lowerCAmelCase = (5 * i + 1) % 16 elif i <= 47: lowerCAmelCase = b ^ c ^ d lowerCAmelCase = (3 * i + 5) % 16 else: lowerCAmelCase = c ^ (b | not_aa(_UpperCAmelCase )) lowerCAmelCase = (7 * i) % 16 lowerCAmelCase = (f + a + added_consts[i] + block_words[g]) % 2**32 lowerCAmelCase = d lowerCAmelCase = c lowerCAmelCase = b lowerCAmelCase = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) ) # Add hashed chunk to running total lowerCAmelCase = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
155
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class __lowerCAmelCase ( A ): UpperCamelCase = '''open-llama''' def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = rms_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = kwargs.pop( 'use_memorry_efficient_attention' , A) _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_dropout_prob _UpperCAmelCase = use_stable_embedding _UpperCAmelCase = shared_input_output_embedding _UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , ) def _lowerCamelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F"got {self.rope_scaling}") _UpperCAmelCase = self.rope_scaling.get('type' , A) _UpperCAmelCase = self.rope_scaling.get('factor' , A) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
339
0
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed UpperCamelCase = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowercase_ ( _lowerCamelCase : Optional[Any]): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any]): if args.student_type == "roberta": lowercase__ : List[Any] = False elif args.student_type == "gpt2": lowercase__ : str = False def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int): if args.student_type == "roberta": lowercase__ : List[Any] = False def lowercase_ ( ): lowercase__ : Any = argparse.ArgumentParser(description="Training") parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists.") parser.add_argument( "--dump_path" , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The output directory (log, checkpoints, parameters, etc.)") parser.add_argument( "--data_file" , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=_UpperCAmelCase , choices=["distilbert", "roberta", "gpt2"] , required=_UpperCAmelCase , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Path to the student configuration.") parser.add_argument( "--student_pretrained_weights" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Load student initialization checkpoint.") parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=_UpperCAmelCase , help="Teacher type (BERT, RoBERTa).") parser.add_argument("--teacher_name" , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The teacher model.") parser.add_argument("--temperature" , default=2.0 , type=_UpperCAmelCase , help="Temperature for the softmax temperature.") parser.add_argument( "--alpha_ce" , default=0.5 , type=_UpperCAmelCase , help="Linear weight for the distillation loss. Must be >=0.") parser.add_argument( "--alpha_mlm" , default=0.0 , type=_UpperCAmelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=_UpperCAmelCase , help="Linear weight for the CLM loss. Must be >=0.") parser.add_argument("--alpha_mse" , default=0.0 , type=_UpperCAmelCase , help="Linear weight of the MSE loss. Must be >=0.") parser.add_argument( "--alpha_cos" , default=0.0 , type=_UpperCAmelCase , help="Linear weight of the cosine embedding loss. Must be >=0.") parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.") parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=_UpperCAmelCase , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=_UpperCAmelCase , help="Proportion of tokens to mask out.") parser.add_argument("--word_keep" , default=0.1 , type=_UpperCAmelCase , help="Proportion of tokens to keep.") parser.add_argument("--word_rand" , default=0.1 , type=_UpperCAmelCase , help="Proportion of tokens to randomly replace.") parser.add_argument( "--mlm_smoothing" , default=0.7 , type=_UpperCAmelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=_UpperCAmelCase , help="The token counts in the data_file for MLM.") parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only." , ) parser.add_argument("--n_epoch" , type=_UpperCAmelCase , default=3 , help="Number of pass on the whole dataset.") parser.add_argument("--batch_size" , type=_UpperCAmelCase , default=5 , help="Batch size (for each process).") parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=_UpperCAmelCase , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=_UpperCAmelCase , help="Linear warmup proportion.") parser.add_argument("--weight_decay" , default=0.0 , type=_UpperCAmelCase , help="Weight decay if we apply some.") parser.add_argument("--learning_rate" , default=5E-4 , type=_UpperCAmelCase , help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon" , default=1E-6 , type=_UpperCAmelCase , help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm" , default=5.0 , type=_UpperCAmelCase , help="Max gradient norm.") parser.add_argument("--initializer_range" , default=0.02 , type=_UpperCAmelCase , help="Random initialization range.") parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=_UpperCAmelCase , default="O1" , help=( "For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=_UpperCAmelCase , default=1 , help="Number of GPUs in the node.") parser.add_argument("--local_rank" , type=_UpperCAmelCase , default=-1 , help="Distributed training - Local rank") parser.add_argument("--seed" , type=_UpperCAmelCase , default=56 , help="Random seed") parser.add_argument("--log_interval" , type=_UpperCAmelCase , default=500 , help="Tensorboard logging interval.") parser.add_argument("--checkpoint_interval" , type=_UpperCAmelCase , default=4000 , help="Checkpoint interval.") lowercase__ : List[Any] = parser.parse_args() sanity_checks(_UpperCAmelCase) # ARGS # init_gpu_params(_UpperCAmelCase) set_seed(_UpperCAmelCase) if args.is_master: if os.path.exists(args.dump_path): if not args.force: raise ValueError( f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' " itUse `--force` if you want to overwrite it") else: shutil.rmtree(args.dump_path) if not os.path.exists(args.dump_path): os.makedirs(args.dump_path) logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''') # SAVE PARAMS # logger.info(f'''Param: {args}''') with open(os.path.join(args.dump_path , "parameters.json") , "w") as f: json.dump(vars(_UpperCAmelCase) , _UpperCAmelCase , indent=4) git_log(args.dump_path) lowercase__ , lowercase__ , lowercase__ : int = MODEL_CLASSES[args.student_type] lowercase__ , lowercase__ , lowercase__ : str = MODEL_CLASSES[args.teacher_type] # TOKENIZER # lowercase__ : Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name) lowercase__ : List[Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): lowercase__ : Optional[int] = tokenizer.all_special_tokens.index(_UpperCAmelCase) lowercase__ : Any = tokenizer.all_special_ids[idx] logger.info(f'''Special tokens {special_tok_ids}''') lowercase__ : int = special_tok_ids lowercase__ : int = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'''Loading data from {args.data_file}''') with open(args.data_file , "rb") as fp: lowercase__ : Union[str, Any] = pickle.load(_UpperCAmelCase) if args.mlm: logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''') with open(args.token_counts , "rb") as fp: lowercase__ : Dict = pickle.load(_UpperCAmelCase) lowercase__ : str = np.maximum(_UpperCAmelCase , 1) ** -args.mlm_smoothing for idx in special_tok_ids.values(): lowercase__ : Tuple = 0.0 # do not predict special tokens lowercase__ : Optional[int] = torch.from_numpy(_UpperCAmelCase) else: lowercase__ : List[str] = None lowercase__ : Optional[int] = LmSeqsDataset(params=_UpperCAmelCase , data=_UpperCAmelCase) logger.info("Data loader created.") # STUDENT # logger.info(f'''Loading student config from {args.student_config}''') lowercase__ : Optional[int] = student_config_class.from_pretrained(args.student_config) lowercase__ : Optional[Any] = True if args.student_pretrained_weights is not None: logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''') lowercase__ : Any = student_model_class.from_pretrained(args.student_pretrained_weights , config=_UpperCAmelCase) else: lowercase__ : Tuple = student_model_class(_UpperCAmelCase) if args.n_gpu > 0: student.to(f'''cuda:{args.local_rank}''') logger.info("Student loaded.") # TEACHER # lowercase__ : Tuple = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_UpperCAmelCase) if args.n_gpu > 0: teacher.to(f'''cuda:{args.local_rank}''') logger.info(f'''Teacher loaded from {args.teacher_name}.''') # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_UpperCAmelCase , _UpperCAmelCase) if args.freeze_token_type_embds: freeze_token_type_embeddings(_UpperCAmelCase , _UpperCAmelCase) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() lowercase__ : Any = Distiller( params=_UpperCAmelCase , dataset=_UpperCAmelCase , token_probs=_UpperCAmelCase , student=_UpperCAmelCase , teacher=_UpperCAmelCase) distiller.train() logger.info("Let\'s go get some drinks.") if __name__ == "__main__": main()
87
def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') ) def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = credit_card_number _UpperCAmelCase = 0 _UpperCAmelCase = len(_UpperCAmelCase ) - 2 for i in range(_UpperCAmelCase , -1 , -2 ): # double the value of every second digit _UpperCAmelCase = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 _UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(F"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(_UpperCAmelCase ) <= 16: print(F"{error_message} of its length." ) return False if not validate_initial_digits(_UpperCAmelCase ): print(F"{error_message} of its first two digits." ) return False if not luhn_validation(_UpperCAmelCase ): print(F"{error_message} it fails the Luhn check." ) return False print(F"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("4111111111111111") validate_credit_card_number("32323")
339
0
'''simple docstring''' import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch _lowerCAmelCase = True except ImportError: _lowerCAmelCase = False try: from torch.hub import _get_torch_home _lowerCAmelCase = _get_torch_home() except ImportError: _lowerCAmelCase = os.path.expanduser( os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch''')) ) _lowerCAmelCase = os.path.join(torch_cache_home, '''transformers''') _lowerCAmelCase = '''https://cdn.huggingface.co''' _lowerCAmelCase = '''https://s3.amazonaws.com/models.huggingface.co/bert''' _lowerCAmelCase = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1]) _lowerCAmelCase = os.path.join(PATH, '''config.yaml''') _lowerCAmelCase = os.path.join(PATH, '''attributes.txt''') _lowerCAmelCase = os.path.join(PATH, '''objects.txt''') _lowerCAmelCase = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path) _lowerCAmelCase = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE) _lowerCAmelCase = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE) _lowerCAmelCase = '''pytorch_model.bin''' _lowerCAmelCase = '''config.yaml''' def __lowerCAmelCase ( snake_case__=OBJECTS , snake_case__=ATTRIBUTES ): __UpperCamelCase : str = [] with open(_UpperCAmelCase ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) __UpperCamelCase : str = [] with open(_UpperCAmelCase ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Any = OrderedDict() with open(_UpperCAmelCase , "rb" ) as f: __UpperCamelCase : List[str] = pkl.load(_UpperCAmelCase )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): __UpperCamelCase : Dict = ckp.pop(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , np.ndarray ): __UpperCamelCase : Tuple = torch.tensor(_UpperCAmelCase ) else: assert isinstance(_UpperCAmelCase , torch.tensor ), type(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = v return r class A : '''simple docstring''' A = {} def __init__(self , _UpperCAmelCase , _UpperCAmelCase = "root" , _UpperCAmelCase=0 ) -> Tuple: __UpperCamelCase : Optional[int] = name __UpperCamelCase : Any = level __UpperCamelCase : List[Any] = {} for k, v in dictionary.items(): if v is None: raise ValueError() __UpperCamelCase : int = copy.deepcopy(_UpperCAmelCase ) __UpperCamelCase : str = copy.deepcopy(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __UpperCamelCase : Any = Config(_UpperCAmelCase , name=_UpperCAmelCase , level=level + 1 ) __UpperCamelCase : Any = v setattr(self , _UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = d def __repr__(self ) -> Optional[int]: return str(list((self._pointer.keys()) ) ) def __setattr__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: __UpperCamelCase : Any = val __UpperCamelCase : Any = val __UpperCamelCase : Optional[Any] = key.split("." ) __UpperCamelCase : int = len(_UpperCAmelCase ) - 1 __UpperCamelCase : List[str] = self._pointer if len(_UpperCAmelCase ) > 1: for i, l in enumerate(_UpperCAmelCase ): if hasattr(self , _UpperCAmelCase ) and isinstance(getattr(self , _UpperCAmelCase ) , _UpperCAmelCase ): setattr(getattr(self , _UpperCAmelCase ) , ".".join(levels[i:] ) , _UpperCAmelCase ) if l == last_level: __UpperCamelCase : List[Any] = val else: __UpperCamelCase : Union[str, Any] = pointer[l] def a_ (self ) -> Tuple: return self._pointer def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> str: with open(f"{file_name}" , "w" ) as stream: dump(_UpperCAmelCase , _UpperCAmelCase ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> str: with open(f"{file_name}" , "w" ) as stream: json.dump(_UpperCAmelCase , _UpperCAmelCase ) @staticmethod def a_ (_UpperCAmelCase ) -> int: with open(_UpperCAmelCase ) as stream: __UpperCamelCase : int = load(_UpperCAmelCase , Loader=_UpperCAmelCase ) return data def __str__(self ) -> str: __UpperCamelCase : Union[str, Any] = " " if self._name != "root": __UpperCamelCase : List[str] = f"{t * (self._level-1)}{self._name}:\n" else: __UpperCamelCase : List[str] = "" __UpperCamelCase : Any = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): r += f"{t * (self._level)}{v}\n" self._level += 1 else: r += f"{t * (self._level)}{k}: {v} ({type(_UpperCAmelCase ).__name__})\n" __UpperCamelCase : int = level return r[:-1] @classmethod def a_ (cls , _UpperCAmelCase , **_UpperCAmelCase ) -> List[str]: __UpperCamelCase , __UpperCamelCase : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase ) return cls(_UpperCAmelCase ) @classmethod def a_ (cls , _UpperCAmelCase , **_UpperCAmelCase ) -> Dict: __UpperCamelCase : Tuple = kwargs.pop("cache_dir" , _UpperCAmelCase ) __UpperCamelCase : Optional[int] = kwargs.pop("force_download" , _UpperCAmelCase ) __UpperCamelCase : Optional[int] = kwargs.pop("resume_download" , _UpperCAmelCase ) __UpperCamelCase : List[Any] = kwargs.pop("proxies" , _UpperCAmelCase ) __UpperCamelCase : List[str] = kwargs.pop("local_files_only" , _UpperCAmelCase ) if os.path.isdir(_UpperCAmelCase ): __UpperCamelCase : Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) elif os.path.isfile(_UpperCAmelCase ) or is_remote_url(_UpperCAmelCase ): __UpperCamelCase : str = pretrained_model_name_or_path else: __UpperCamelCase : int = hf_bucket_url(_UpperCAmelCase , filename=_UpperCAmelCase , use_cdn=_UpperCAmelCase ) try: # Load from URL or cache if already cached __UpperCamelCase : List[str] = cached_path( _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , ) # Load config dict if resolved_config_file is None: raise EnvironmentError __UpperCamelCase : List[Any] = Config.load_yaml(_UpperCAmelCase ) except EnvironmentError: __UpperCamelCase : Any = "Can\'t load config for" raise EnvironmentError(_UpperCAmelCase ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(_UpperCAmelCase ), kwargs def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Optional[Any] = torch.load("dump.pt" , map_location=in_tensor.device ) __UpperCamelCase : List[str] = in_tensor.numpy() __UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0] print(na.shape , na[0, 0, :5] ) print(na.shape , na[0, 0, :5] ) assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , rtol=0.01 , atol=0.1 ), ( F"{sum([1 for x in np.isclose(_UpperCAmelCase , _UpperCAmelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %" " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Any = urlparse(_UpperCAmelCase ) return parsed.scheme in ("http", "https") def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=True ): __UpperCamelCase : Dict = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX __UpperCamelCase : List[str] = "/" not in model_id if legacy_format: return F"{endpoint}/{model_id}-{filename}" else: return F"{endpoint}/{model_id}/{filename}" def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=0 , snake_case__=None , ): __UpperCamelCase : Optional[Any] = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): ua += "; " + "; ".join("{}/{}".format(_UpperCAmelCase , _UpperCAmelCase ) for k, v in user_agent.items() ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): ua += "; " + user_agent __UpperCamelCase : Union[str, Any] = {"user-agent": ua} if resume_size > 0: __UpperCamelCase : List[str] = "bytes=%d-" % (resume_size,) __UpperCamelCase : Union[str, Any] = requests.get(_UpperCAmelCase , stream=_UpperCAmelCase , proxies=_UpperCAmelCase , headers=_UpperCAmelCase ) if response.status_code == 416: # Range not satisfiable return __UpperCamelCase : Union[str, Any] = response.headers.get("Content-Length" ) __UpperCamelCase : Any = resume_size + int(_UpperCAmelCase ) if content_length is not None else None __UpperCamelCase : Any = tqdm( unit="B" , unit_scale=_UpperCAmelCase , total=_UpperCAmelCase , initial=_UpperCAmelCase , desc="Downloading" , ) for chunk in response.iter_content(chunk_size=1_024 ): if chunk: # filter out keep-alive new chunks progress.update(len(_UpperCAmelCase ) ) temp_file.write(_UpperCAmelCase ) progress.close() def __lowerCAmelCase ( snake_case__ , snake_case__=None , snake_case__=False , snake_case__=None , snake_case__=10 , snake_case__=False , snake_case__=None , snake_case__=False , ): if cache_dir is None: __UpperCamelCase : int = TRANSFORMERS_CACHE if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __UpperCamelCase : int = str(_UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) __UpperCamelCase : List[str] = None if not local_files_only: try: __UpperCamelCase : Tuple = requests.head(_UpperCAmelCase , allow_redirects=_UpperCAmelCase , proxies=_UpperCAmelCase , timeout=_UpperCAmelCase ) if response.status_code == 200: __UpperCamelCase : List[Any] = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass __UpperCamelCase : Optional[int] = url_to_filename(_UpperCAmelCase , _UpperCAmelCase ) # get cache path to put the file __UpperCamelCase : Any = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(_UpperCAmelCase ): return cache_path else: __UpperCamelCase : Union[str, Any] = [ file for file in fnmatch.filter(os.listdir(_UpperCAmelCase ) , filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(_UpperCAmelCase ) > 0: return os.path.join(_UpperCAmelCase , matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set \'local_files_only\'" " to False." ) return None # From now on, etag is not None. if os.path.exists(_UpperCAmelCase ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. __UpperCamelCase : List[str] = cache_path + ".lock" with FileLock(_UpperCAmelCase ): # If the download just completed while the lock was activated. if os.path.exists(_UpperCAmelCase ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: __UpperCamelCase : Optional[Any] = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(_UpperCAmelCase , "a+b" ) as f: yield f __UpperCamelCase : Any = _resumable_file_manager if os.path.exists(_UpperCAmelCase ): __UpperCamelCase : Tuple = os.stat(_UpperCAmelCase ).st_size else: __UpperCamelCase : int = 0 else: __UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile , dir=_UpperCAmelCase , delete=_UpperCAmelCase ) __UpperCamelCase : List[str] = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" , _UpperCAmelCase , temp_file.name , ) http_get( _UpperCAmelCase , _UpperCAmelCase , proxies=_UpperCAmelCase , resume_size=_UpperCAmelCase , user_agent=_UpperCAmelCase , ) os.replace(temp_file.name , _UpperCAmelCase ) __UpperCamelCase : Optional[int] = {"url": url, "etag": etag} __UpperCamelCase : List[Any] = cache_path + ".json" with open(_UpperCAmelCase , "w" ) as meta_file: json.dump(_UpperCAmelCase , _UpperCAmelCase ) return cache_path def __lowerCAmelCase ( snake_case__ , snake_case__=None ): __UpperCamelCase : int = url.encode("utf-8" ) __UpperCamelCase : str = shaaaa(_UpperCAmelCase ) __UpperCamelCase : List[Any] = url_hash.hexdigest() if etag: __UpperCamelCase : List[Any] = etag.encode("utf-8" ) __UpperCamelCase : int = shaaaa(_UpperCAmelCase ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def __lowerCAmelCase ( snake_case__ , snake_case__=None , snake_case__=False , snake_case__=None , snake_case__=False , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=False , ): if cache_dir is None: __UpperCamelCase : Tuple = TRANSFORMERS_CACHE if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __UpperCamelCase : Dict = str(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __UpperCamelCase : Optional[int] = str(_UpperCAmelCase ) if is_remote_url(_UpperCAmelCase ): # URL, so get it from the cache (downloading if necessary) __UpperCamelCase : Dict = get_from_cache( _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , user_agent=_UpperCAmelCase , local_files_only=_UpperCAmelCase , ) elif os.path.exists(_UpperCAmelCase ): # File, and it exists. __UpperCamelCase : Dict = url_or_filename elif urlparse(_UpperCAmelCase ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(_UpperCAmelCase ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(_UpperCAmelCase ) ) if extract_compressed_file: if not is_zipfile(_UpperCAmelCase ) and not tarfile.is_tarfile(_UpperCAmelCase ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" __UpperCamelCase , __UpperCamelCase : List[str] = os.path.split(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = output_file.replace("." , "-" ) + "-extracted" __UpperCamelCase : Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.isdir(_UpperCAmelCase ) and os.listdir(_UpperCAmelCase ) and not force_extract: return output_path_extracted # Prevent parallel extractions __UpperCamelCase : Dict = output_path + ".lock" with FileLock(_UpperCAmelCase ): shutil.rmtree(_UpperCAmelCase , ignore_errors=_UpperCAmelCase ) os.makedirs(_UpperCAmelCase ) if is_zipfile(_UpperCAmelCase ): with ZipFile(_UpperCAmelCase , "r" ) as zip_file: zip_file.extractall(_UpperCAmelCase ) zip_file.close() elif tarfile.is_tarfile(_UpperCAmelCase ): __UpperCamelCase : List[Any] = tarfile.open(_UpperCAmelCase ) tar_file.extractall(_UpperCAmelCase ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(_UpperCAmelCase ) ) return output_path_extracted return output_path def __lowerCAmelCase ( snake_case__ , snake_case__="," ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) if os.path.isfile(_UpperCAmelCase ): with open(_UpperCAmelCase ) as f: __UpperCamelCase : Union[str, Any] = eval(f.read() ) else: __UpperCamelCase : Dict = requests.get(_UpperCAmelCase ) try: __UpperCamelCase : Tuple = requests.json() except Exception: __UpperCamelCase : List[str] = req.content.decode() assert data is not None, "could not connect" try: __UpperCamelCase : Dict = eval(_UpperCAmelCase ) except Exception: __UpperCamelCase : Dict = data.split("\n" ) req.close() return data def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Optional[int] = requests.get(_UpperCAmelCase ) __UpperCamelCase : Any = np.array(Image.open(BytesIO(response.content ) ) ) return img def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : str = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(_UpperCAmelCase ) with open(_UpperCAmelCase , "rb" ) as stream: __UpperCamelCase : List[Any] = pkl.load(_UpperCAmelCase ) __UpperCamelCase : Tuple = weights.pop("model" ) __UpperCamelCase : int = {} for k, v in model.items(): __UpperCamelCase : Any = torch.from_numpy(_UpperCAmelCase ) if "running_var" in k: __UpperCamelCase : int = torch.tensor([0] ) __UpperCamelCase : Tuple = k.replace("running_var" , "num_batches_tracked" ) __UpperCamelCase : Dict = zero return new def __lowerCAmelCase ( ): print(F"{os.path.abspath(os.path.join(_UpperCAmelCase , os.pardir ) )}/demo.ipynb" ) def __lowerCAmelCase ( snake_case__ , snake_case__="RGB" ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) if os.path.isfile(_UpperCAmelCase ): __UpperCamelCase : List[Any] = cva.imread(_UpperCAmelCase ) else: __UpperCamelCase : int = get_image_from_url(_UpperCAmelCase ) assert img is not None, F"could not connect to: {im}" __UpperCamelCase : Optional[Any] = cva.cvtColor(_UpperCAmelCase , cva.COLOR_BGR2RGB ) if input_format == "RGB": __UpperCamelCase : str = img[:, :, ::-1] return img def __lowerCAmelCase ( snake_case__ , snake_case__=1 ): return (images[i : i + batch] for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ))
298
from functools import reduce UpperCAmelCase__ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A ( _UpperCAmelCase : str = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_UpperCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
339
0
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline lowerCAmelCase = { '''n_samples''': 6_4, '''horizon''': 3_2, '''num_inference_steps''': 2_0, '''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network '''scale_grad_by_std''': True, '''scale''': 0.1, '''eta''': 0.0, '''t_grad_cutoff''': 2, '''device''': '''cpu''', } if __name__ == "__main__": lowerCAmelCase = '''hopper-medium-v2''' lowerCAmelCase = gym.make(env_name) lowerCAmelCase = ValueGuidedRLPipeline.from_pretrained( '''bglick13/hopper-medium-v2-value-function-hor32''', env=env, ) env.seed(0) lowerCAmelCase = env.reset() lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 1_0_0_0 lowerCAmelCase = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy lowerCAmelCase = pipeline(obs, planning_horizon=3_2) # execute action in environment lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = env.step(denorm_actions) lowerCAmelCase = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:' F' {total_score}' ) # save observations for rendering rollout.append(next_observation.copy()) lowerCAmelCase = next_observation except KeyboardInterrupt: pass print(F'Total reward: {total_reward}')
295
from __future__ import annotations from collections.abc import Callable UpperCAmelCase__ = list[list[float | int]] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for row in range(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = matrix[row][col] _UpperCAmelCase = vector[row][0] _UpperCAmelCase = 0 _UpperCAmelCase = 0 while row < size and col < size: # pivoting _UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _UpperCAmelCase ): _UpperCAmelCase = augmented[rowa][col] / augmented[row][col] _UpperCAmelCase = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _UpperCAmelCase ): for row in range(_UpperCAmelCase ): _UpperCAmelCase = augmented[row][col] / augmented[col][col] for cola in range(_UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase ) ] def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for x_val, y_val in enumerate(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = (x_val + 1) ** (size - col - 1) _UpperCAmelCase = y_val _UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase ) def interpolated_func(_UpperCAmelCase : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_UpperCAmelCase ) ) return interpolated_func def A ( _UpperCAmelCase : int ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int: '''simple docstring''' _UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )] _UpperCAmelCase = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _UpperCAmelCase = 0 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for poly in polynomials: _UpperCAmelCase = 1 while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ): x_val += 1 ret += poly(_UpperCAmelCase ) return ret if __name__ == "__main__": print(f"""{solution() = }""")
339
0
'''simple docstring''' import math def _lowerCamelCase ( lowercase : int ) -> list[int]: _a = [] _a = 2 _a = int(math.sqrt(_UpperCAmelCase ) ) # Size of every segment _a = [True] * (end + 1) _a = [] while start <= end: if temp[start] is True: in_prime.append(_UpperCAmelCase ) for i in range(start * start , end + 1 , _UpperCAmelCase ): _a = False start += 1 prime += in_prime _a = end + 1 _a = min(2 * end , _UpperCAmelCase ) while low <= n: _a = [True] * (high - low + 1) for each in in_prime: _a = math.floor(low / each ) * each if t < low: t += each for j in range(_UpperCAmelCase , high + 1 , _UpperCAmelCase ): _a = False for j in range(len(_UpperCAmelCase ) ): if temp[j] is True: prime.append(j + low ) _a = high + 1 _a = min(high + end , _UpperCAmelCase ) return prime print(sieve(10**6))
63
from __future__ import annotations def A ( _UpperCAmelCase : list[int] ) -> bool: '''simple docstring''' return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __magic_name__ = 16 __magic_name__ = 32 def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = 16 ): __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __SCREAMING_SNAKE_CASE = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(UpperCamelCase_ ): # max_length=None => use the model max length (it's actually the default) __SCREAMING_SNAKE_CASE = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __SCREAMING_SNAKE_CASE = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(UpperCamelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. __SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __SCREAMING_SNAKE_CASE = 16 elif accelerator.mixed_precision != "no": __SCREAMING_SNAKE_CASE = 8 else: __SCREAMING_SNAKE_CASE = None return tokenizer.pad( _UpperCAmelCase , padding="""longest""" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["""train"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["""validation"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __magic_name__ = mocked_dataloaders # noqa: F811 def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _UpperCAmelCase ) == "1": __SCREAMING_SNAKE_CASE = 2 # New Code # __SCREAMING_SNAKE_CASE = int(args.gradient_accumulation_steps ) # Initialize accelerator __SCREAMING_SNAKE_CASE = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __SCREAMING_SNAKE_CASE = config["""lr"""] __SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] ) __SCREAMING_SNAKE_CASE = int(config["""seed"""] ) __SCREAMING_SNAKE_CASE = int(config["""batch_size"""] ) __SCREAMING_SNAKE_CASE = evaluate.load("""glue""" , """mrpc""" ) set_seed(_UpperCAmelCase ) __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __SCREAMING_SNAKE_CASE = model.to(accelerator.device ) # Instantiate optimizer __SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler __SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_UpperCAmelCase ): __SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = output.loss accelerator.backward(_UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 ) __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) __SCREAMING_SNAKE_CASE = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase ) def _lowerCAmelCase ( ): __SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=_UpperCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
100
import os UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} def A ( _UpperCAmelCase : str ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(_UpperCAmelCase ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( _UpperCAmelCase : int ) -> str: '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int: '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase ) _UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase ) savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase ) return savings if __name__ == "__main__": print(f"""{solution() = }""")
339
0
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES __lowerCamelCase : int = '''tiny-wmt19-en-ru''' # Build # borrowed from a test __lowerCamelCase : List[Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] __lowerCamelCase : Union[str, Any] = dict(zip(vocab, range(len(vocab)))) __lowerCamelCase : str = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase : int = Path(tmpdirname) __lowerCamelCase : Union[str, Any] = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] __lowerCamelCase : Any = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] __lowerCamelCase : Optional[int] = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) __lowerCamelCase : Union[str, Any] = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) __lowerCamelCase : Union[str, Any] = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) __lowerCamelCase : int = FSMTForConditionalGeneration(config) print(F"""num of params {tiny_model.num_parameters()}""") # Test __lowerCamelCase : List[Any] = tokenizer(['''Making tiny model'''], return_tensors='''pt''') __lowerCamelCase : Union[str, Any] = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
219
import requests from bsa import BeautifulSoup def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str: '''simple docstring''' _UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' ) _UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} ) _UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' ) return anchors[2].get_text() if __name__ == "__main__": UpperCAmelCase__ = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
339
0
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowercase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=16 , __snake_case=2 , __snake_case=0.02 , __snake_case=4 , ): _SCREAMING_SNAKE_CASE : Any = parent _SCREAMING_SNAKE_CASE : List[Any] = batch_size _SCREAMING_SNAKE_CASE : Optional[Any] = seq_length _SCREAMING_SNAKE_CASE : Tuple = is_training _SCREAMING_SNAKE_CASE : List[Any] = use_attention_mask _SCREAMING_SNAKE_CASE : Optional[int] = use_token_type_ids _SCREAMING_SNAKE_CASE : Optional[Any] = use_labels _SCREAMING_SNAKE_CASE : Dict = vocab_size _SCREAMING_SNAKE_CASE : Dict = hidden_size _SCREAMING_SNAKE_CASE : Dict = num_hidden_layers _SCREAMING_SNAKE_CASE : int = num_attention_heads _SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size _SCREAMING_SNAKE_CASE : List[Any] = hidden_act _SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob _SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Dict = max_position_embeddings _SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size _SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size _SCREAMING_SNAKE_CASE : Any = initializer_range _SCREAMING_SNAKE_CASE : Any = num_choices def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE : Tuple = None if self.use_attention_mask: _SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE : List[str] = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE : Optional[int] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs _SCREAMING_SNAKE_CASE : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class lowercase__ ( _snake_case , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = True A_ : Union[str, Any] = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : Any = FlaxRoFormerModelTester(self ) @slow def UpperCAmelCase_ ( self ): for model_class_name in self.all_model_classes: _SCREAMING_SNAKE_CASE : int = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__snake_case ) _SCREAMING_SNAKE_CASE : Union[str, Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__snake_case ) @require_flax class lowercase__ ( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : Tuple = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) _SCREAMING_SNAKE_CASE : str = jnp.array([[0, 1, 2, 3, 4, 5]] ) _SCREAMING_SNAKE_CASE : Dict = model(__snake_case )[0] _SCREAMING_SNAKE_CASE : Union[str, Any] = 5_0000 _SCREAMING_SNAKE_CASE : Any = (1, 6, vocab_size) self.assertEqual(output.shape , __snake_case ) _SCREAMING_SNAKE_CASE : Dict = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
200
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_attention_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_choices def _lowerCamelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCAmelCase = None if self.use_attention_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCAmelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCamelCase ( self : List[Any]) -> List[str]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" _UpperCAmelCase = FlaxRoFormerModelTester(self) @slow def _lowerCamelCase ( self : List[Any]) -> Dict: """simple docstring""" for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A) _UpperCAmelCase = model(np.ones((1, 1))) self.assertIsNotNone(A) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base') _UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]]) _UpperCAmelCase = model(A)[0] _UpperCAmelCase = 5_00_00 _UpperCAmelCase = (1, 6, vocab_size) self.assertEqual(output.shape , A) _UpperCAmelCase = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]]) self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
339
0
A_ :int = '''0.18.2''' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
71
UpperCAmelCase__ = {} def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: '''simple docstring''' # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _UpperCAmelCase = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 ) _UpperCAmelCase = state_late + state_absent + state_ontime _UpperCAmelCase = prizestrings return prizestrings def A ( _UpperCAmelCase : int = 30 ) -> int: '''simple docstring''' return _calculate(_UpperCAmelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
339
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class A__ ( unittest.TestCase ): def A ( self : str ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =tempfile.mkdtemp() _SCREAMING_SNAKE_CASE =[ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) _SCREAMING_SNAKE_CASE ={ 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } _SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(_a , _a ) def A ( self : List[str] , **_a : Optional[int] ) -> Union[str, Any]: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def A ( self : Optional[Any] , **_a : str ) -> List[str]: '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a ) def A ( self : List[str] , **_a : Union[str, Any] ) -> str: '''simple docstring''' return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_a ) def A ( self : Tuple ) -> str: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A ( self : int ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def A ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.get_tokenizer() _SCREAMING_SNAKE_CASE =self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE =self.get_image_processor() _SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a ) processor_slow.save_pretrained(self.tmpdirname ) _SCREAMING_SNAKE_CASE =AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_a ) _SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a ) processor_fast.save_pretrained(self.tmpdirname ) _SCREAMING_SNAKE_CASE =AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _a ) self.assertIsInstance(processor_fast.tokenizer , _a ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _a ) self.assertIsInstance(processor_fast.image_processor , _a ) def A ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 ) _SCREAMING_SNAKE_CASE =AlignProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _a ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def A ( self : Optional[Any] ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.get_image_processor() _SCREAMING_SNAKE_CASE =self.get_tokenizer() _SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a ) _SCREAMING_SNAKE_CASE =self.prepare_image_inputs() _SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='np' ) _SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A ( self : List[Any] ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.get_image_processor() _SCREAMING_SNAKE_CASE =self.get_tokenizer() _SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a ) _SCREAMING_SNAKE_CASE ='lower newer' _SCREAMING_SNAKE_CASE =processor(text=_a ) _SCREAMING_SNAKE_CASE =tokenizer(_a , padding='max_length' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A ( self : Any ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.get_image_processor() _SCREAMING_SNAKE_CASE =self.get_tokenizer() _SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a ) _SCREAMING_SNAKE_CASE ='lower newer' _SCREAMING_SNAKE_CASE =self.prepare_image_inputs() _SCREAMING_SNAKE_CASE =processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def A ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.get_image_processor() _SCREAMING_SNAKE_CASE =self.get_tokenizer() _SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a ) _SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _SCREAMING_SNAKE_CASE =processor.batch_decode(_a ) _SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def A ( self : Dict ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.get_image_processor() _SCREAMING_SNAKE_CASE =self.get_tokenizer() _SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a ) _SCREAMING_SNAKE_CASE ='lower newer' _SCREAMING_SNAKE_CASE =self.prepare_image_inputs() _SCREAMING_SNAKE_CASE =processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
47
import os import sys import unittest UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers") class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = find_backend(' if not is_torch_available():') self.assertEqual(A , 'torch') # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):') self.assertEqual(A , 'torch_and_transformers') # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _UpperCAmelCase = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):') self.assertEqual(A , 'torch_and_transformers_and_onnx') def _lowerCamelCase ( self : int) -> Dict: """simple docstring""" _UpperCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , A) self.assertIn('torch_and_transformers' , A) self.assertIn('flax_and_transformers' , A) self.assertIn('torch_and_transformers_and_onnx' , A) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch']) self.assertIn('FlaxUNet2DConditionModel' , objects['flax']) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers']) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers']) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy']) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx']) def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'') self.assertEqual(A , '\nCONSTANT = None\n') _UpperCAmelCase = create_dummy_object('function' , '\'torch\'') self.assertEqual( A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n') _UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' _UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'') self.assertEqual(A , A) def _lowerCamelCase ( self : Dict) -> int: """simple docstring""" _UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' _UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']}) self.assertEqual(dummy_files['torch'] , A)
339
0
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): if not numbers: return 0 if not isinstance(_UpperCAmelCase , (list, tuple) ) or not all( isinstance(_UpperCAmelCase , _UpperCAmelCase ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) __lowercase : List[str] = numbers[0] for i in range(1 , len(_UpperCAmelCase ) ): # update the maximum and minimum subarray products __lowercase : Union[str, Any] = numbers[i] if number < 0: __lowercase ,__lowercase : Optional[int] = min_till_now, max_till_now __lowercase : Optional[int] = max(_UpperCAmelCase , max_till_now * number ) __lowercase : Tuple = min(_UpperCAmelCase , min_till_now * number ) # update the maximum product found till now __lowercase : List[Any] = max(_UpperCAmelCase , _UpperCAmelCase ) return max_prod
249
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") UpperCAmelCase__ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , ) UpperCamelCase = field( default=1_0_2_4 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} ) def _lowerCamelCase ( self : str) -> List[Any]: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.') else: _UpperCAmelCase = self.train_file.split('.')[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." _UpperCAmelCase = self.validation_file.split('.')[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __lowerCAmelCase : UpperCamelCase = field( default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def A ( ) -> Optional[int]: '''simple docstring''' # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) datasets.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. _UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: _UpperCAmelCase = data_args.train_file.split('.' )[-1] _UpperCAmelCase = data_args.test_file.split('.' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." _UpperCAmelCase = data_args.test_file else: raise ValueError('Need either a GLUE task or a test file for `do_predict`.' ) for key in data_files.keys(): logger.info(F"load a local file for {key}: {data_files[key]}" ) if data_args.train_file.endswith('.csv' ): # Loading a dataset from local csv files _UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files _UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels _UpperCAmelCase = raw_datasets['train'].features['label'].names _UpperCAmelCase = len(_UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer _UpperCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , ) _UpperCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: _UpperCAmelCase = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _UpperCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. _UpperCAmelCase = {'Refused': 0, 'Entailed': 1} _UpperCAmelCase = {0: 'Refused', 1: 'Entailed'} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ): # Tokenize the texts def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ): _UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )] _UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd _UpperCAmelCase = examples['statement'] _UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) ) _UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ) _UpperCAmelCase = examples['label'] return result with training_args.main_process_first(desc='dataset map pre-processing' ): _UpperCAmelCase = raw_datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) _UpperCAmelCase = raw_datasets['train'] if data_args.max_train_samples is not None: _UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) _UpperCAmelCase = raw_datasets['validation'] if data_args.max_eval_samples is not None: _UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('--do_predict requires a test dataset' ) _UpperCAmelCase = raw_datasets['test'] if data_args.max_predict_samples is not None: _UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ): logger.info(F"Sample {index} of the training set: {train_dataset[index]}." ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCAmelCase : EvalPrediction ): _UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _UpperCAmelCase = default_data_collator elif training_args.fpaa: _UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) else: _UpperCAmelCase = None # Initialize our Trainer _UpperCAmelCase = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) _UpperCAmelCase = train_result.metrics _UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase ) ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , _UpperCAmelCase ) trainer.save_metrics('train' , _UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase ) _UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.log_metrics('eval' , _UpperCAmelCase ) trainer.save_metrics('eval' , _UpperCAmelCase ) if training_args.do_predict: logger.info('*** Predict ***' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. _UpperCAmelCase = predict_dataset.remove_columns('label' ) _UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) _UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' ) if trainer.is_world_process_zero(): with open(_UpperCAmelCase , 'w' ) as writer: logger.info('***** Predict Results *****' ) writer.write('index\tprediction\n' ) for index, item in enumerate(_UpperCAmelCase ): _UpperCAmelCase = label_list[item] writer.write(F"{index}\t{item}\n" ) _UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'} if training_args.push_to_hub: trainer.push_to_hub(**_UpperCAmelCase ) else: trainer.create_model_card(**_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
0
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class SCREAMING_SNAKE_CASE__ ( nn.Module ): _a = 42 _a = 42 _a = 0.0 _a = 1 _a = 1 _a = True _a = False _a = False _a = False _a = jnp.floataa def __lowercase ( self : Union[str, Any] ): lowerCAmelCase = [] lowerCAmelCase = [] for i in range(self.num_layers ): lowerCAmelCase = self.in_channels if i == 0 else self.out_channels lowerCAmelCase = FlaxResnetBlockaD( in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase ) lowerCAmelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowerCAmelCase ) lowerCAmelCase = resnets lowerCAmelCase = attentions if self.add_downsample: lowerCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : str , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Optional[int]=True ): lowerCAmelCase = () for resnet, attn in zip(self.resnets , self.attentions ): lowerCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) lowerCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) output_states += (hidden_states,) if self.add_downsample: lowerCAmelCase = self.downsamplers_a(lowerCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): _a = 42 _a = 42 _a = 0.0 _a = 1 _a = True _a = jnp.floataa def __lowercase ( self : List[str] ): lowerCAmelCase = [] for i in range(self.num_layers ): lowerCAmelCase = self.in_channels if i == 0 else self.out_channels lowerCAmelCase = FlaxResnetBlockaD( in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase ) lowerCAmelCase = resnets if self.add_downsample: lowerCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]=True ): lowerCAmelCase = () for resnet in self.resnets: lowerCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) output_states += (hidden_states,) if self.add_downsample: lowerCAmelCase = self.downsamplers_a(lowerCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): _a = 42 _a = 42 _a = 42 _a = 0.0 _a = 1 _a = 1 _a = True _a = False _a = False _a = False _a = jnp.floataa def __lowercase ( self : Tuple ): lowerCAmelCase = [] lowerCAmelCase = [] for i in range(self.num_layers ): lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels lowerCAmelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase ) lowerCAmelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowerCAmelCase ) lowerCAmelCase = resnets lowerCAmelCase = attentions if self.add_upsample: lowerCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states lowerCAmelCase = res_hidden_states_tuple[-1] lowerCAmelCase = res_hidden_states_tuple[:-1] lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) lowerCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) lowerCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) if self.add_upsample: lowerCAmelCase = self.upsamplers_a(lowerCAmelCase ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): _a = 42 _a = 42 _a = 42 _a = 0.0 _a = 1 _a = True _a = jnp.floataa def __lowercase ( self : Union[str, Any] ): lowerCAmelCase = [] for i in range(self.num_layers ): lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels lowerCAmelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase ) lowerCAmelCase = resnets if self.add_upsample: lowerCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any=True ): for resnet in self.resnets: # pop res hidden states lowerCAmelCase = res_hidden_states_tuple[-1] lowerCAmelCase = res_hidden_states_tuple[:-1] lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) lowerCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) if self.add_upsample: lowerCAmelCase = self.upsamplers_a(lowerCAmelCase ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): _a = 42 _a = 0.0 _a = 1 _a = 1 _a = False _a = False _a = jnp.floataa def __lowercase ( self : List[Any] ): lowerCAmelCase = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] lowerCAmelCase = [] for _ in range(self.num_layers ): lowerCAmelCase = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowerCAmelCase ) lowerCAmelCase = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase ) lowerCAmelCase = resnets lowerCAmelCase = attentions def __call__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple=True ): lowerCAmelCase = self.resnets[0](lowerCAmelCase , lowerCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): lowerCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) lowerCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase ) return hidden_states
155
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any: '''simple docstring''' _UpperCAmelCase = multiprocessing.Manager() _UpperCAmelCase = manager.list() _UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('timed out' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil _UpperCAmelCase = shutil.rmtree _UpperCAmelCase = os.rmdir _UpperCAmelCase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: _UpperCAmelCase = {} with swallow_io(): with time_limit(_UpperCAmelCase ): exec(_UpperCAmelCase , _UpperCAmelCase ) result.append('passed' ) except TimeoutException: result.append('timed out' ) except BaseException as e: result.append(F"failed: {e}" ) # Needed for cleaning up. _UpperCAmelCase = rmtree _UpperCAmelCase = rmdir _UpperCAmelCase = chdir @contextlib.contextmanager def A ( _UpperCAmelCase : Union[str, Any] ) -> Any: '''simple docstring''' def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ): raise TimeoutException('Timed out!' ) signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase ) signal.signal(signal.SIGALRM , _UpperCAmelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def A ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = WriteOnlyStringIO() with contextlib.redirect_stdout(_UpperCAmelCase ): with contextlib.redirect_stderr(_UpperCAmelCase ): with redirect_stdin(_UpperCAmelCase ): yield @contextlib.contextmanager def A ( ) -> Any: '''simple docstring''' with tempfile.TemporaryDirectory() as dirname: with chdir(_UpperCAmelCase ): yield dirname class __lowerCAmelCase ( A ): pass class __lowerCAmelCase ( io.StringIO ): def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any: """simple docstring""" raise OSError def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]: """simple docstring""" raise OSError def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]: """simple docstring""" raise OSError def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]: """simple docstring""" return False class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore UpperCamelCase = '''stdin''' @contextlib.contextmanager def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' if root == ".": yield return _UpperCAmelCase = os.getcwd() os.chdir(_UpperCAmelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str]=None ) -> Any: '''simple docstring''' if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins _UpperCAmelCase = None _UpperCAmelCase = None import os _UpperCAmelCase = '1' _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import shutil _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import subprocess _UpperCAmelCase = None # type: ignore _UpperCAmelCase = None import sys _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None
339
0
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def lowercase_ ( _lowerCamelCase : str = "isbn/0140328726"): lowercase__ : Optional[Any] = olid.strip().strip("/") # Remove leading/trailing whitespace & slashes if new_olid.count("/") != 1: lowercase__ : Optional[Any] = f'''{olid} is not a valid Open Library olid''' raise ValueError(_UpperCAmelCase) return requests.get(f'''https://openlibrary.org/{new_olid}.json''').json() def lowercase_ ( _lowerCamelCase : dict): lowercase__ : List[Any] = { "title": "Title", "publish_date": "Publish date", "authors": "Authors", "number_of_pages": "Number of pages:", "first_sentence": "First sentence", "isbn_10": "ISBN (10)", "isbn_13": "ISBN (13)", } lowercase__ : Tuple = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} lowercase__ : Optional[int] = [ get_openlibrary_data(author["key"])["name"] for author in data["Authors"] ] lowercase__ : Any = data["First sentence"]["value"] for key, value in data.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase): lowercase__ : Any = ", ".join(_UpperCAmelCase) return data if __name__ == "__main__": import doctest doctest.testmod() while True: UpperCamelCase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.") continue print(f"\nSearching Open Library for ISBN: {isbn}...\n") try: UpperCamelCase = summarize_book(get_openlibrary_data(f"isbn/{isbn}")) print('''\n'''.join(f"{key}: {value}" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f"Sorry, there are no results for ISBN: {isbn}.")
87
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str: '''simple docstring''' try: _UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: _UpperCAmelCase = strtobool(_UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False) def A ( _UpperCAmelCase : List[str] ) -> List[str]: '''simple docstring''' return unittest.skip('Test was skipped' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> str: '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> str: '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> Dict: '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> List[str]: '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : str ) -> str: '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> str: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> int: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> Any: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict: '''simple docstring''' if test_case is None: return partial(_UpperCAmelCase , version=_UpperCAmelCase ) return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> int: '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase ) UpperCAmelCase__ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def A ( _UpperCAmelCase : List[str] ) -> Any: '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase ) class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase = True @classmethod def _lowerCamelCase ( cls : List[Any]) -> Tuple: """simple docstring""" _UpperCAmelCase = tempfile.mkdtemp() @classmethod def _lowerCamelCase ( cls : Union[str, Any]) -> str: """simple docstring""" if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def _lowerCamelCase ( self : List[str]) -> List[Any]: """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir).glob('**/*'): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A) class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Dict) -> Tuple: """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple: """simple docstring""" _UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def A ( _UpperCAmelCase : List[Any] ) -> int: '''simple docstring''' _UpperCAmelCase = AcceleratorState() _UpperCAmelCase = tensor[None].clone().to(state.device ) _UpperCAmelCase = gather(_UpperCAmelCase ).cpu() _UpperCAmelCase = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _UpperCAmelCase ): return False return True class __lowerCAmelCase : def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]: """simple docstring""" _UpperCAmelCase = returncode _UpperCAmelCase = stdout _UpperCAmelCase = stderr async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]: '''simple docstring''' while True: _UpperCAmelCase = await stream.readline() if line: callback(_UpperCAmelCase ) else: break async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput: '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(_UpperCAmelCase ) ) _UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _UpperCAmelCase = [] _UpperCAmelCase = [] def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ): _UpperCAmelCase = line.decode('utf-8' ).rstrip() sink.append(_UpperCAmelCase ) if not quiet: print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ), ] , timeout=_UpperCAmelCase , ) return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase ) def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput: '''simple docstring''' _UpperCAmelCase = asyncio.get_event_loop() _UpperCAmelCase = loop.run_until_complete( _stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) ) _UpperCAmelCase = ' '.join(_UpperCAmelCase ) if result.returncode > 0: _UpperCAmelCase = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class __lowerCAmelCase ( A ): pass def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple: '''simple docstring''' try: _UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_UpperCAmelCase , 'decode' ): _UpperCAmelCase = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
339
0
'''simple docstring''' import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated _lowerCAmelCase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ _lowerCAmelCase = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : int = numpy.dtype(numpy.uintaa ).newbyteorder(">" ) return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0] @deprecated(_UpperCAmelCase , "Please use tf.data to implement this functionality." ) def __lowerCAmelCase ( snake_case__ ): print("Extracting" , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __UpperCamelCase : Union[str, Any] = _readaa(_UpperCAmelCase ) if magic != 2_051: raise ValueError( "Invalid magic number %d in MNIST image file: %s" % (magic, f.name) ) __UpperCamelCase : Union[str, Any] = _readaa(_UpperCAmelCase ) __UpperCamelCase : List[Any] = _readaa(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = _readaa(_UpperCAmelCase ) __UpperCamelCase : List[Any] = bytestream.read(rows * cols * num_images ) __UpperCamelCase : int = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) __UpperCamelCase : List[Any] = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 ) return data @deprecated(_UpperCAmelCase , "Please use tf.one_hot on tensors." ) def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : List[Any] = labels_dense.shape[0] __UpperCamelCase : Optional[int] = numpy.arange(_UpperCAmelCase ) * num_classes __UpperCamelCase : Union[str, Any] = numpy.zeros((num_labels, num_classes) ) __UpperCamelCase : Dict = 1 return labels_one_hot @deprecated(_UpperCAmelCase , "Please use tf.data to implement this functionality." ) def __lowerCAmelCase ( snake_case__ , snake_case__=False , snake_case__=10 ): print("Extracting" , f.name ) with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream: __UpperCamelCase : Any = _readaa(_UpperCAmelCase ) if magic != 2_049: raise ValueError( "Invalid magic number %d in MNIST label file: %s" % (magic, f.name) ) __UpperCamelCase : Union[str, Any] = _readaa(_UpperCAmelCase ) __UpperCamelCase : List[Any] = bytestream.read(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase ) return labels class A : '''simple docstring''' @deprecated( _UpperCAmelCase , "Please use alternatives such as official/mnist/_DataSet.py" " from tensorflow/models." , ) def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=None , ) -> List[str]: __UpperCamelCase , __UpperCamelCase : str = random_seed.get_seed(_UpperCAmelCase ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __UpperCamelCase : Union[str, Any] = dtypes.as_dtype(_UpperCAmelCase ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype ) if fake_data: __UpperCamelCase : Any = 1_0_0_0_0 __UpperCamelCase : List[Any] = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f"images.shape: {images.shape} labels.shape: {labels.shape}" __UpperCamelCase : List[str] = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __UpperCamelCase : Tuple = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __UpperCamelCase : str = images.astype(numpy.floataa ) __UpperCamelCase : Any = numpy.multiply(_UpperCAmelCase , 1.0 / 2_5_5.0 ) __UpperCamelCase : str = images __UpperCamelCase : Tuple = labels __UpperCamelCase : int = 0 __UpperCamelCase : Union[str, Any] = 0 @property def a_ (self ) -> Optional[Any]: return self._images @property def a_ (self ) -> List[str]: return self._labels @property def a_ (self ) -> List[Any]: return self._num_examples @property def a_ (self ) -> Tuple: return self._epochs_completed def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=True ) -> Optional[int]: if fake_data: __UpperCamelCase : List[Any] = [1] * 7_8_4 __UpperCamelCase : Any = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(_UpperCAmelCase )], [fake_label for _ in range(_UpperCAmelCase )], ) __UpperCamelCase : Dict = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __UpperCamelCase : Tuple = numpy.arange(self._num_examples ) numpy.random.shuffle(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = self.images[perma] __UpperCamelCase : Optional[int] = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __UpperCamelCase : List[Any] = self._num_examples - start __UpperCamelCase : List[Any] = self._images[start : self._num_examples] __UpperCamelCase : List[Any] = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __UpperCamelCase : List[str] = numpy.arange(self._num_examples ) numpy.random.shuffle(_UpperCAmelCase ) __UpperCamelCase : List[str] = self.images[perm] __UpperCamelCase : int = self.labels[perm] # Start next epoch __UpperCamelCase : Any = 0 __UpperCamelCase : str = batch_size - rest_num_examples __UpperCamelCase : List[str] = self._index_in_epoch __UpperCamelCase : List[Any] = self._images[start:end] __UpperCamelCase : Optional[Any] = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __UpperCamelCase : Any = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(_UpperCAmelCase , "Please write your own downloading logic." ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): if not gfile.Exists(_UpperCAmelCase ): gfile.MakeDirs(_UpperCAmelCase ) __UpperCamelCase : Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not gfile.Exists(_UpperCAmelCase ): urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310 with gfile.GFile(_UpperCAmelCase ) as f: __UpperCamelCase : str = f.size() print("Successfully downloaded" , _UpperCAmelCase , _UpperCAmelCase , "bytes." ) return filepath @deprecated( _UpperCAmelCase , "Please use alternatives such as:" " tensorflow_datasets.load(\'mnist\')" ) def __lowerCAmelCase ( snake_case__ , snake_case__=False , snake_case__=False , snake_case__=dtypes.floataa , snake_case__=True , snake_case__=5_000 , snake_case__=None , snake_case__=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase ) __UpperCamelCase : str = fake() __UpperCamelCase : Any = fake() __UpperCamelCase : Optional[int] = fake() return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase ) if not source_url: # empty string check __UpperCamelCase : Tuple = DEFAULT_SOURCE_URL __UpperCamelCase : Any = "train-images-idx3-ubyte.gz" __UpperCamelCase : List[Any] = "train-labels-idx1-ubyte.gz" __UpperCamelCase : Dict = "t10k-images-idx3-ubyte.gz" __UpperCamelCase : Dict = "t10k-labels-idx1-ubyte.gz" __UpperCamelCase : Union[str, Any] = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file ) with gfile.Open(_UpperCAmelCase , "rb" ) as f: __UpperCamelCase : str = _extract_images(_UpperCAmelCase ) __UpperCamelCase : List[Any] = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file ) with gfile.Open(_UpperCAmelCase , "rb" ) as f: __UpperCamelCase : Dict = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) __UpperCamelCase : Any = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file ) with gfile.Open(_UpperCAmelCase , "rb" ) as f: __UpperCamelCase : Any = _extract_images(_UpperCAmelCase ) __UpperCamelCase : int = _maybe_download( _UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file ) with gfile.Open(_UpperCAmelCase , "rb" ) as f: __UpperCamelCase : Tuple = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase ) if not 0 <= validation_size <= len(_UpperCAmelCase ): __UpperCamelCase : Optional[Any] = ( "Validation size should be between 0 and " F"{len(_UpperCAmelCase )}. Received: {validation_size}." ) raise ValueError(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = train_images[:validation_size] __UpperCamelCase : Dict = train_labels[:validation_size] __UpperCamelCase : Union[str, Any] = train_images[validation_size:] __UpperCamelCase : str = train_labels[validation_size:] __UpperCamelCase : Any = {"dtype": dtype, "reshape": reshape, "seed": seed} __UpperCamelCase : Optional[Any] = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Dict = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : List[str] = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
298
from __future__ import annotations UpperCAmelCase__ = list[list[int]] # assigning initial values to the grid UpperCAmelCase__ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase__ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool: '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def A ( _UpperCAmelCase : Matrix ) -> Matrix | None: '''simple docstring''' if location := find_empty_location(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): _UpperCAmelCase = digit if sudoku(_UpperCAmelCase ) is not None: return grid _UpperCAmelCase = 0 return None def A ( _UpperCAmelCase : Matrix ) -> None: '''simple docstring''' for row in grid: for cell in row: print(_UpperCAmelCase , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") UpperCAmelCase__ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
339
0
from __future__ import annotations def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [True] * limit __lowercase= False __lowercase= False __lowercase= True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): __lowercase= i * 2 while index < limit: __lowercase= False __lowercase= index + i __lowercase= [2] for i in range(3 , _UpperCAmelCase , 2 ): if is_prime[i]: primes.append(_UpperCAmelCase ) return primes def _lowerCamelCase( lowercase__ = 1_0_0_0_0_0_0 ) -> int: '''simple docstring''' __lowercase= prime_sieve(_UpperCAmelCase ) __lowercase= 0 __lowercase= 0 for i in range(len(_UpperCAmelCase ) ): for j in range(i + length , len(_UpperCAmelCase ) ): __lowercase= sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: __lowercase= j - i __lowercase= sol return largest if __name__ == "__main__": print(F'{solution() = }')
295
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : List[Any]) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Value('string' , id='sequence'), }) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]: """simple docstring""" import nltk nltk.download('wordnet') if NLTK_VERSION >= version.Version('3.6.5'): nltk.download('punkt') if NLTK_VERSION >= version.Version('3.6.6'): nltk.download('omw-1.4') def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any: """simple docstring""" if NLTK_VERSION >= version.Version('3.6.5'): _UpperCAmelCase = [ meteor_score.single_meteor_score( word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] else: _UpperCAmelCase = [ meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] return {"meteor": np.mean(A)}
339
0
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Optional[int] , __a : Tuple , __a : Tuple=14 , __a : Dict=7 , __a : Any=True , __a : str=True , __a : Tuple=False , __a : Optional[int]=True , __a : Tuple=99 , __a : List[str]=32 , __a : List[str]=4 , __a : Dict=4 , __a : List[Any]=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Dict=0.1 , __a : Tuple=0.1 , __a : int=5_12 , __a : Optional[Any]=0.02 , ): _a = parent _a = batch_size _a = seq_length _a = is_training _a = use_input_mask _a = use_token_type_ids _a = use_labels _a = vocab_size _a = hidden_size _a = rotary_dim _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = initializer_range _a = None _a = vocab_size - 1 _a = vocab_size - 1 _a = vocab_size - 1 def UpperCamelCase__ ( self : Optional[Any] ): _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = None if self.use_input_mask: _a = random_attention_mask([self.batch_size, self.seq_length] ) _a = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def UpperCamelCase__ ( self : Optional[int] ): _a = self.prepare_config_and_inputs() _a , _a , _a = config_and_inputs _a = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def UpperCamelCase__ ( self : Tuple , __a : List[str] , __a : str , __a : Any , __a : Any ): _a = 20 _a = model_class_name(__a ) _a = model.init_cache(input_ids.shape[0] , __a ) _a = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" ) _a = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) _a = model( input_ids[:, :-1] , attention_mask=__a , past_key_values=__a , position_ids=__a , ) _a = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" ) _a = model( input_ids[:, -1:] , attention_mask=__a , past_key_values=outputs_cache.past_key_values , position_ids=__a , ) _a = model(__a ) _a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def UpperCamelCase__ ( self : Dict , __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[Any] ): _a = 20 _a = model_class_name(__a ) _a = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) _a = model.init_cache(input_ids.shape[0] , __a ) _a = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) _a = model( input_ids[:, :-1] , attention_mask=__a , past_key_values=__a , position_ids=__a , ) _a = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" ) _a = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__a , position_ids=__a , ) _a = model(__a , attention_mask=__a ) _a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) @require_flax class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =(FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __a =(FlaxGPTJForCausalLM,) if is_flax_available() else () def UpperCamelCase__ ( self : List[Any] ): _a = FlaxGPTJModelTester(self ) def UpperCamelCase__ ( self : Optional[Any] ): for model_class_name in self.all_model_classes: _a , _a , _a = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(__a , __a , __a , __a ) def UpperCamelCase__ ( self : Union[str, Any] ): for model_class_name in self.all_model_classes: _a , _a , _a = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( __a , __a , __a , __a ) @tooslow def UpperCamelCase__ ( self : str ): _a = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" ) _a = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=__a , truncation=__a ) _a = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" ) _a = False _a = model.config.eos_token_id _a = jax.jit(model.generate ) _a = jit_generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences _a = tokenizer.batch_decode(__a , skip_special_tokens=__a ) _a = [ "Hello this is a long string of text.\n\nI\'m trying to get the text of the", "Hey, I\'m a little late to the party. I\'m going to", ] self.assertListEqual(__a , __a ) @is_pt_flax_cross_test def UpperCamelCase__ ( self : List[str] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs _a = self._prepare_for_class(__a , __a ) _a = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class _a = model_class.__name__[4:] # Skip the "Flax" at the beginning _a = getattr(__a , __a ) _a , _a = pt_inputs["input_ids"].shape _a = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__a ): _a = 0 _a = 1 _a = 0 _a = 1 _a = pt_model_class(__a ).eval() _a = model_class(__a , dtype=jnp.floataa ) _a = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a ) _a = fx_state with torch.no_grad(): _a = pt_model(**__a ).to_tuple() _a = fx_model(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(__a , __a ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__a ) _a = model_class.from_pretrained(__a , from_pt=__a ) _a = fx_model_loaded(**__a ).to_tuple() self.assertEqual( len(__a ) , len(__a ) , "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(__a , __a ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def UpperCamelCase__ ( self : Optional[int] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs _a = self._prepare_for_class(__a , __a ) _a = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class _a = model_class.__name__[4:] # Skip the "Flax" at the beginning _a = getattr(__a , __a ) _a = pt_model_class(__a ).eval() _a = model_class(__a , dtype=jnp.floataa ) _a = load_flax_weights_in_pytorch_model(__a , fx_model.params ) _a , _a = pt_inputs["input_ids"].shape _a = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__a ): _a = 0 _a = 1 _a = 0 _a = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): _a = pt_model(**__a ).to_tuple() _a = fx_model(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(__a , __a ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__a ) _a = pt_model_class.from_pretrained(__a , from_flax=__a ) with torch.no_grad(): _a = pt_model_loaded(**__a ).to_tuple() self.assertEqual( len(__a ) , len(__a ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(__a , __a ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def UpperCamelCase__ ( self : Dict ): for model_class_name in self.all_model_classes: _a = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" ) _a = model(np.ones((1, 1) ) ) self.assertIsNotNone(__a )
63
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration UpperCAmelCase__ = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' _UpperCAmelCase = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def A ( _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = list(s_dict.keys() ) for key in keys: _UpperCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: _UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase ) print(F"{key} -> {new_key}" ) _UpperCAmelCase = s_dict.pop(_UpperCAmelCase ) return s_dict def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = emb.weight.shape _UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) _UpperCAmelCase = emb.weight.data return lin_layer def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes: '''simple docstring''' os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) _UpperCAmelCase = os.path.basename(_UpperCAmelCase ) _UpperCAmelCase = url.split('/' )[-2] _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ): raise RuntimeError(F"{download_target} exists and is not a regular file" ) if os.path.isfile(_UpperCAmelCase ): _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop: while True: _UpperCAmelCase = source.read(8_192 ) if not buffer: break output.write(_UpperCAmelCase ) loop.update(len(_UpperCAmelCase ) ) _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' if ".pt" not in checkpoint_path: _UpperCAmelCase = _download(_MODELS[checkpoint_path] ) else: _UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' ) _UpperCAmelCase = original_checkpoint['dims'] _UpperCAmelCase = original_checkpoint['model_state_dict'] _UpperCAmelCase = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(_UpperCAmelCase ) rename_keys(_UpperCAmelCase ) _UpperCAmelCase = True _UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0] _UpperCAmelCase = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) _UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F" but all the following weights are missing {missing}" ) if tie_embeds: _UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCAmelCase = proj_out_weights model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
339
0
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def _lowerCAmelCase ( UpperCamelCase_ ): return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __SCREAMING_SNAKE_CASE = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" ) __SCREAMING_SNAKE_CASE = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" ) __SCREAMING_SNAKE_CASE = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" ) __SCREAMING_SNAKE_CASE = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" ) __SCREAMING_SNAKE_CASE = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" ) __SCREAMING_SNAKE_CASE = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" ) __SCREAMING_SNAKE_CASE = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" ) __SCREAMING_SNAKE_CASE = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" ) __SCREAMING_SNAKE_CASE = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" ) __SCREAMING_SNAKE_CASE = key.replace("""image_encoder.module""" , """flava.image_model""" ) __SCREAMING_SNAKE_CASE = key.replace("""text_encoder.module""" , """flava.text_model""" ) __SCREAMING_SNAKE_CASE = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" ) __SCREAMING_SNAKE_CASE = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" ) __SCREAMING_SNAKE_CASE = key.replace("""text_projection""" , """flava.text_projection""" ) __SCREAMING_SNAKE_CASE = key.replace("""image_projection""" , """flava.image_projection""" ) __SCREAMING_SNAKE_CASE = value.float() for key, value in codebook_state_dict.items(): __SCREAMING_SNAKE_CASE = value return upgrade @torch.no_grad() def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ): if config_path is not None: __SCREAMING_SNAKE_CASE = FlavaConfig.from_pretrained(_UpperCAmelCase ) else: __SCREAMING_SNAKE_CASE = FlavaConfig() __SCREAMING_SNAKE_CASE = FlavaForPreTraining(_UpperCAmelCase ).eval() __SCREAMING_SNAKE_CASE = convert_dalle_checkpoint(_UpperCAmelCase , _UpperCAmelCase , save_checkpoint=_UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ): __SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location="""cpu""" ) else: __SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="""cpu""" ) __SCREAMING_SNAKE_CASE = upgrade_state_dict(_UpperCAmelCase , _UpperCAmelCase ) hf_model.load_state_dict(_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = hf_model.state_dict() __SCREAMING_SNAKE_CASE = count_parameters(_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = count_parameters(_UpperCAmelCase ) + count_parameters(_UpperCAmelCase ) assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) hf_model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __magic_name__ = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
100
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ): UpperCamelCase = None UpperCamelCase = None class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ): UpperCamelCase = datasets.Audio() UpperCamelCase = '''audio''' UpperCamelCase = AudioFolderConfig UpperCamelCase = 42 # definition at the bottom of the script UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' ) UpperCAmelCase__ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] UpperCAmelCase__ = AUDIO_EXTENSIONS
339
0
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def __a ( *_lowercase : Dict , **_lowercase : Any ): """simple docstring""" pass @is_pipeline_test @require_vision @require_timm @require_torch class __snake_case ( unittest.TestCase ): lowerCAmelCase_ = MODEL_FOR_OBJECT_DETECTION_MAPPING def __a ( self : Tuple , _lowercase : Any , _lowercase : Dict , _lowercase : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ObjectDetectionPipeline(model=_lowercase , image_processor=_lowercase ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __a ( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 ) self.assertGreater(len(_lowercase ) , 0 ) for detected_object in outputs: self.assertEqual( _lowercase , { """score""": ANY(_lowercase ), """label""": ANY(_lowercase ), """box""": {"""xmin""": ANY(_lowercase ), """ymin""": ANY(_lowercase ), """xmax""": ANY(_lowercase ), """ymax""": ANY(_lowercase )}, } , ) import datasets SCREAMING_SNAKE_CASE__ = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) SCREAMING_SNAKE_CASE__ = [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] SCREAMING_SNAKE_CASE__ = object_detector(_lowercase , threshold=0.0 ) self.assertEqual(len(_lowercase ) , len(_lowercase ) ) for outputs in batch_outputs: self.assertGreater(len(_lowercase ) , 0 ) for detected_object in outputs: self.assertEqual( _lowercase , { """score""": ANY(_lowercase ), """label""": ANY(_lowercase ), """box""": {"""xmin""": ANY(_lowercase ), """ymin""": ANY(_lowercase ), """xmax""": ANY(_lowercase ), """ymax""": ANY(_lowercase )}, } , ) @require_tf @unittest.skip("""Object detection not implemented in TF""" ) def __a ( self : Dict ): """simple docstring""" pass @require_torch def __a ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """hf-internal-testing/tiny-detr-mobilenetsv3""" SCREAMING_SNAKE_CASE__ = AutoModelForObjectDetection.from_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase ) SCREAMING_SNAKE_CASE__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 ) self.assertEqual( nested_simplify(_lowercase , decimals=4 ) , [ {"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}}, {"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}}, ] , ) SCREAMING_SNAKE_CASE__ = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(_lowercase , decimals=4 ) , [ [ {"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}}, {"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}}, ], [ {"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}}, {"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}}, ], ] , ) @require_torch @slow def __a ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """facebook/detr-resnet-50""" SCREAMING_SNAKE_CASE__ = AutoModelForObjectDetection.from_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase ) SCREAMING_SNAKE_CASE__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(_lowercase , decimals=4 ) , [ {"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}}, {"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}}, {"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}}, {"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}}, {"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}}, ] , ) SCREAMING_SNAKE_CASE__ = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(_lowercase , decimals=4 ) , [ [ {"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}}, {"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}}, {"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}}, {"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}}, {"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}}, ], [ {"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}}, {"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}}, {"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}}, {"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}}, {"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}}, ], ] , ) @require_torch @slow def __a ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """facebook/detr-resnet-50""" SCREAMING_SNAKE_CASE__ = pipeline("""object-detection""" , model=_lowercase ) SCREAMING_SNAKE_CASE__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(_lowercase , decimals=4 ) , [ {"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}}, {"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}}, {"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}}, {"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}}, {"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}}, ] , ) SCREAMING_SNAKE_CASE__ = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(_lowercase , decimals=4 ) , [ [ {"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}}, {"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}}, {"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}}, {"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}}, {"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}}, ], [ {"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}}, {"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}}, {"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}}, {"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}}, {"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}}, ], ] , ) @require_torch @slow def __a ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 0.99_85 SCREAMING_SNAKE_CASE__ = """facebook/detr-resnet-50""" SCREAMING_SNAKE_CASE__ = pipeline("""object-detection""" , model=_lowercase ) SCREAMING_SNAKE_CASE__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=_lowercase ) self.assertEqual( nested_simplify(_lowercase , decimals=4 ) , [ {"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}}, {"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}}, ] , ) @require_torch @require_pytesseract @slow def __a ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """Narsil/layoutlmv3-finetuned-funsd""" SCREAMING_SNAKE_CASE__ = 0.99_93 SCREAMING_SNAKE_CASE__ = pipeline("""object-detection""" , model=_lowercase , threshold=_lowercase ) SCREAMING_SNAKE_CASE__ = object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" ) self.assertEqual( nested_simplify(_lowercase , decimals=4 ) , [ {"""score""": 0.99_93, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_94, """ymin""": 2_54, """xmax""": 3_43, """ymax""": 2_64}}, {"""score""": 0.99_93, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_94, """ymin""": 2_54, """xmax""": 3_43, """ymax""": 2_64}}, ] , )
219
import sys from collections import defaultdict class __lowerCAmelCase : def __init__( self : int) -> str: """simple docstring""" _UpperCAmelCase = [] def _lowerCamelCase ( self : Any , A : List[str]) -> int: """simple docstring""" return self.node_position[vertex] def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = pos def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCAmelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCAmelCase = 2 * start + 1 else: _UpperCAmelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child] _UpperCAmelCase , _UpperCAmelCase = ( heap[start], positions[start], ) _UpperCAmelCase , _UpperCAmelCase = temp, tempa _UpperCAmelCase = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , A) self.top_to_bottom(A , A , A , A) def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any: """simple docstring""" _UpperCAmelCase = position[index] while index != 0: _UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _UpperCAmelCase = heap[parent] _UpperCAmelCase = position[parent] self.set_position(position[parent] , A) else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , A) break _UpperCAmelCase = parent else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , 0) def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str: """simple docstring""" _UpperCAmelCase = len(A) // 2 - 1 for i in range(A , -1 , -1): self.top_to_bottom(A , A , len(A) , A) def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = positions[0] _UpperCAmelCase = sys.maxsize self.top_to_bottom(A , 0 , len(A) , A) return temp def A ( _UpperCAmelCase : int ) -> Any: '''simple docstring''' _UpperCAmelCase = Heap() _UpperCAmelCase = [0] * len(_UpperCAmelCase ) _UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCAmelCase = [] for vertex in range(len(_UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_UpperCAmelCase ) heap.node_position.append(_UpperCAmelCase ) _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCAmelCase = 0 _UpperCAmelCase = distance heap.heapify(_UpperCAmelCase , _UpperCAmelCase ) for _ in range(1 , len(_UpperCAmelCase ) ): _UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCAmelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_UpperCAmelCase )] ): _UpperCAmelCase = distance heap.bottom_to_top( _UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
339
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter UpperCAmelCase_ : Optional[int] = 'Create a default config file for Accelerate with only a few flags set.' def snake_case_ ( SCREAMING_SNAKE_CASE__="no" , SCREAMING_SNAKE_CASE__ = default_json_config_file , SCREAMING_SNAKE_CASE__ = False ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = Path(_UpperCAmelCase ) path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase ) if path.exists(): print( f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False _SCREAMING_SNAKE_CASE : Union[str, Any] = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" ) _SCREAMING_SNAKE_CASE : List[str] = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): _SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.device_count() _SCREAMING_SNAKE_CASE : Dict = num_gpus _SCREAMING_SNAKE_CASE : Optional[int] = False if num_gpus > 1: _SCREAMING_SNAKE_CASE : Any = """MULTI_GPU""" else: _SCREAMING_SNAKE_CASE : int = """NO""" elif is_xpu_available() and use_xpu: _SCREAMING_SNAKE_CASE : Optional[int] = torch.xpu.device_count() _SCREAMING_SNAKE_CASE : List[Any] = num_xpus _SCREAMING_SNAKE_CASE : Optional[Any] = False if num_xpus > 1: _SCREAMING_SNAKE_CASE : int = """MULTI_XPU""" else: _SCREAMING_SNAKE_CASE : List[str] = """NO""" elif is_npu_available(): _SCREAMING_SNAKE_CASE : Optional[int] = torch.npu.device_count() _SCREAMING_SNAKE_CASE : List[str] = num_npus _SCREAMING_SNAKE_CASE : Dict = False if num_npus > 1: _SCREAMING_SNAKE_CASE : List[Any] = """MULTI_NPU""" else: _SCREAMING_SNAKE_CASE : int = """NO""" else: _SCREAMING_SNAKE_CASE : List[str] = 0 _SCREAMING_SNAKE_CASE : List[str] = True _SCREAMING_SNAKE_CASE : Tuple = 1 _SCREAMING_SNAKE_CASE : Optional[Any] = """NO""" _SCREAMING_SNAKE_CASE : Dict = ClusterConfig(**_UpperCAmelCase ) config.to_json_file(_UpperCAmelCase ) return path def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("""default""" , parents=_UpperCAmelCase , help=_UpperCAmelCase , formatter_class=_UpperCAmelCase ) parser.add_argument( """--config_file""" , default=_UpperCAmelCase , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """ """such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """ """with \'huggingface\'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=_UpperCAmelCase , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=_UpperCAmelCase ) return parser def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f"""accelerate configuration saved at {config_file}""" )
200
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]: '''simple docstring''' # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 _UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1 _UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple _UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _UpperCAmelCase = logits[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=0 ) _UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 ) _UpperCAmelCase = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] ) _UpperCAmelCase = tokenizer.mask_token _UpperCAmelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): _UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(_UpperCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base") UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() UpperCAmelCase__ = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
339
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() A_ :List[str] = logging.get_logger(__name__) def A ( a_ ) -> List[str]: __UpperCamelCase : str ='huggingface/label-files' __UpperCamelCase : List[str] ='imagenet-1k-id2label.json' __UpperCamelCase : Tuple =json.load(open(hf_hub_download(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ,'r' ) ) __UpperCamelCase : Union[str, Any] ={int(_UpperCAmelCase ): v for k, v in idalabel.items()} __UpperCamelCase : Dict ={v: k for k, v in idalabel.items()} __UpperCamelCase : Optional[int] ='std_conv' if 'bit' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" __UpperCamelCase : Dict =BitConfig( conv_layer=_UpperCAmelCase ,num_labels=1_000 ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase ,) return config def A ( a_ ) -> str: if "stem.conv" in name: __UpperCamelCase : Optional[Any] =name.replace('stem.conv' ,'bit.embedder.convolution' ) if "blocks" in name: __UpperCamelCase : Tuple =name.replace('blocks' ,'layers' ) if "head.fc" in name: __UpperCamelCase : Optional[int] =name.replace('head.fc' ,'classifier.1' ) if name.startswith('norm' ): __UpperCamelCase : int ='bit.' + name if "bit" not in name and "classifier" not in name: __UpperCamelCase : str ='bit.encoder.' + name return name def A ( ) -> int: __UpperCamelCase : Any ='http://images.cocodataset.org/val2017/000000039769.jpg' __UpperCamelCase : int =Image.open(requests.get(_UpperCAmelCase ,stream=_UpperCAmelCase ).raw ) return im @torch.no_grad() def A ( a_ ,a_ ,a_=False ) -> Tuple: __UpperCamelCase : Any =get_config(_UpperCAmelCase ) # load original model from timm __UpperCamelCase : Tuple =create_model(_UpperCAmelCase ,pretrained=_UpperCAmelCase ) timm_model.eval() # load state_dict of original model __UpperCamelCase : Optional[Any] =timm_model.state_dict() for key in state_dict.copy().keys(): __UpperCamelCase : Optional[Any] =state_dict.pop(_UpperCAmelCase ) __UpperCamelCase : List[Any] =val.squeeze() if 'head' in key else val # load HuggingFace model __UpperCamelCase : Optional[int] =BitForImageClassification(_UpperCAmelCase ) model.eval() model.load_state_dict(_UpperCAmelCase ) # create image processor __UpperCamelCase : Any =create_transform(**resolve_data_config({} ,model=_UpperCAmelCase ) ) __UpperCamelCase : Tuple =transform.transforms __UpperCamelCase : Any ={ 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } __UpperCamelCase : int =BitImageProcessor( do_resize=_UpperCAmelCase ,size={'shortest_edge': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=_UpperCAmelCase ,crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} ,do_normalize=_UpperCAmelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,) __UpperCamelCase : Tuple =prepare_img() __UpperCamelCase : Any =transform(_UpperCAmelCase ).unsqueeze(0 ) __UpperCamelCase : List[Any] =processor(_UpperCAmelCase ,return_tensors='pt' ).pixel_values # verify pixel values assert torch.allclose(_UpperCAmelCase ,_UpperCAmelCase ) # verify logits with torch.no_grad(): __UpperCamelCase : Tuple =model(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] =outputs.logits print('Logits:' ,logits[0, :3] ) print('Predicted class:' ,model.config.idalabel[logits.argmax(-1 ).item()] ) __UpperCamelCase : Tuple =timm_model(_UpperCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_UpperCAmelCase ,outputs.logits ,atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(_UpperCAmelCase ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: print(F'Pushing model {model_name} and processor to the hub' ) model.push_to_hub(F'ybelkada/{model_name}' ) processor.push_to_hub(F'ybelkada/{model_name}' ) if __name__ == "__main__": A_ :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''resnetv2_50x1_bitm''', type=str, help='''Name of the BiT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub.''', ) A_ :int = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
71
import math import unittest def A ( _UpperCAmelCase : int ) -> bool: '''simple docstring''' assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(11)) self.assertTrue(is_prime(13)) self.assertTrue(is_prime(17)) self.assertTrue(is_prime(19)) self.assertTrue(is_prime(23)) self.assertTrue(is_prime(29)) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" with self.assertRaises(A): is_prime(-19) self.assertFalse( is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , ) self.assertFalse( is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , ) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
339
0
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class A__ ( A__ ): def __init__( self : Dict ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =[] def A ( self : str , _a : str , _a : Optional[int] , _a : List[Any] , **_a : str ) -> Optional[Any]: '''simple docstring''' self.events.append('on_init_end' ) def A ( self : str , _a : int , _a : Tuple , _a : List[str] , **_a : Union[str, Any] ) -> int: '''simple docstring''' self.events.append('on_train_begin' ) def A ( self : Dict , _a : Any , _a : Dict , _a : Optional[int] , **_a : str ) -> List[str]: '''simple docstring''' self.events.append('on_train_end' ) def A ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Any , **_a : str ) -> Optional[int]: '''simple docstring''' self.events.append('on_epoch_begin' ) def A ( self : List[str] , _a : Optional[int] , _a : Optional[int] , _a : int , **_a : Optional[int] ) -> str: '''simple docstring''' self.events.append('on_epoch_end' ) def A ( self : Union[str, Any] , _a : Tuple , _a : Optional[int] , _a : Union[str, Any] , **_a : List[Any] ) -> Union[str, Any]: '''simple docstring''' self.events.append('on_step_begin' ) def A ( self : Optional[int] , _a : Tuple , _a : List[str] , _a : int , **_a : str ) -> Dict: '''simple docstring''' self.events.append('on_step_end' ) def A ( self : Union[str, Any] , _a : Optional[Any] , _a : Tuple , _a : Optional[int] , **_a : Dict ) -> Tuple: '''simple docstring''' self.events.append('on_evaluate' ) def A ( self : Optional[int] , _a : List[str] , _a : str , _a : str , **_a : Optional[int] ) -> int: '''simple docstring''' self.events.append('on_predict' ) def A ( self : Optional[Any] , _a : List[str] , _a : List[str] , _a : Tuple , **_a : Any ) -> Union[str, Any]: '''simple docstring''' self.events.append('on_save' ) def A ( self : Any , _a : Union[str, Any] , _a : Any , _a : Dict , **_a : int ) -> Optional[int]: '''simple docstring''' self.events.append('on_log' ) def A ( self : Union[str, Any] , _a : Union[str, Any] , _a : List[Any] , _a : Dict , **_a : Optional[Any] ) -> List[Any]: '''simple docstring''' self.events.append('on_prediction_step' ) @require_torch class A__ ( unittest.TestCase ): def A ( self : Dict ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =tempfile.mkdtemp() def A ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' shutil.rmtree(self.output_dir ) def A ( self : Any , _a : int=0 , _a : Tuple=0 , _a : str=64 , _a : str=64 , _a : Optional[Any]=None , _a : Tuple=False , **_a : str ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =RegressionDataset(length=_a ) _SCREAMING_SNAKE_CASE =RegressionDataset(length=_a ) _SCREAMING_SNAKE_CASE =RegressionModelConfig(a=_a , b=_a ) _SCREAMING_SNAKE_CASE =RegressionPreTrainedModel(_a ) _SCREAMING_SNAKE_CASE =TrainingArguments(self.output_dir , disable_tqdm=_a , report_to=[] , **_a ) return Trainer( _a , _a , train_dataset=_a , eval_dataset=_a , callbacks=_a , ) def A ( self : List[Any] , _a : Union[str, Any] , _a : Dict ) -> Optional[int]: '''simple docstring''' self.assertEqual(len(_a ) , len(_a ) ) # Order doesn't matter _SCREAMING_SNAKE_CASE =sorted(_a , key=lambda _a : cb.__name__ if isinstance(_a , _a ) else cb.__class__.__name__ ) _SCREAMING_SNAKE_CASE =sorted(_a , key=lambda _a : cb.__name__ if isinstance(_a , _a ) else cb.__class__.__name__ ) for cba, cba in zip(_a , _a ): if isinstance(_a , _a ) and isinstance(_a , _a ): self.assertEqual(_a , _a ) elif isinstance(_a , _a ) and not isinstance(_a , _a ): self.assertEqual(_a , cba.__class__ ) elif not isinstance(_a , _a ) and isinstance(_a , _a ): self.assertEqual(cba.__class__ , _a ) else: self.assertEqual(_a , _a ) def A ( self : int , _a : Optional[int] ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =['on_init_end', 'on_train_begin'] _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =len(trainer.get_eval_dataloader() ) _SCREAMING_SNAKE_CASE =['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate'] for _ in range(trainer.state.num_train_epochs ): expected_events.append('on_epoch_begin' ) for _ in range(_a ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('on_log' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('on_save' ) expected_events.append('on_epoch_end' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def A ( self : Dict ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.get_trainer() _SCREAMING_SNAKE_CASE =DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , _a ) # Callbacks passed at init are added to the default callbacks _SCREAMING_SNAKE_CASE =self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(_a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , _a ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _SCREAMING_SNAKE_CASE =self.get_trainer(disable_tqdm=_a ) _SCREAMING_SNAKE_CASE =DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , _a ) def A ( self : List[str] ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =DEFAULT_CALLBACKS.copy() + [ProgressCallback] _SCREAMING_SNAKE_CASE =self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(_a ) expected_callbacks.remove(_a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , _a ) _SCREAMING_SNAKE_CASE =self.get_trainer() _SCREAMING_SNAKE_CASE =trainer.pop_callback(_a ) self.assertEqual(cb.__class__ , _a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , _a ) trainer.add_callback(_a ) expected_callbacks.insert(0 , _a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , _a ) # We can also add, pop, or remove by instance _SCREAMING_SNAKE_CASE =self.get_trainer() _SCREAMING_SNAKE_CASE =trainer.callback_handler.callbacks[0] trainer.remove_callback(_a ) expected_callbacks.remove(_a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , _a ) _SCREAMING_SNAKE_CASE =self.get_trainer() _SCREAMING_SNAKE_CASE =trainer.callback_handler.callbacks[0] _SCREAMING_SNAKE_CASE =trainer.pop_callback(_a ) self.assertEqual(_a , _a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , _a ) trainer.add_callback(_a ) expected_callbacks.insert(0 , _a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , _a ) def A ( self : List[Any] ) -> Any: '''simple docstring''' import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='ignore' , category=_a ) _SCREAMING_SNAKE_CASE =self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _SCREAMING_SNAKE_CASE =trainer.callback_handler.callbacks[-2].events self.assertEqual(_a , self.get_expected_events(_a ) ) # Independent log/save/eval _SCREAMING_SNAKE_CASE =self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() _SCREAMING_SNAKE_CASE =trainer.callback_handler.callbacks[-2].events self.assertEqual(_a , self.get_expected_events(_a ) ) _SCREAMING_SNAKE_CASE =self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() _SCREAMING_SNAKE_CASE =trainer.callback_handler.callbacks[-2].events self.assertEqual(_a , self.get_expected_events(_a ) ) _SCREAMING_SNAKE_CASE =self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' ) trainer.train() _SCREAMING_SNAKE_CASE =trainer.callback_handler.callbacks[-2].events self.assertEqual(_a , self.get_expected_events(_a ) ) _SCREAMING_SNAKE_CASE =self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' ) trainer.train() _SCREAMING_SNAKE_CASE =trainer.callback_handler.callbacks[-2].events self.assertEqual(_a , self.get_expected_events(_a ) ) # A bit of everything _SCREAMING_SNAKE_CASE =self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , ) trainer.train() _SCREAMING_SNAKE_CASE =trainer.callback_handler.callbacks[-2].events self.assertEqual(_a , self.get_expected_events(_a ) ) # warning should be emitted for duplicated callbacks with patch('transformers.trainer_callback.logger.warning' ) as warn_mock: _SCREAMING_SNAKE_CASE =self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(_a ) in warn_mock.call_args[0][0]
47
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : str) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=A , hypotheses=A , min_len=A , max_len=A) }
339
0
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): return "\n".join( f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=1_0))
249
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCAmelCase__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCAmelCase__ = TaTokenizerFast UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCAmelCase__ = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
339
0
"""simple docstring""" def lowercase (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : int ) -> str: '''simple docstring''' if index == r: for j in range(_UpperCAmelCase ): print(data[j] , end=""" """ ) print(""" """ ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location lowerCAmelCase = arr[i] combination_util(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , index + 1 , _UpperCAmelCase , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def lowercase (snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> List[Any]: '''simple docstring''' lowerCAmelCase = [0] * r # Print all combination using temporary array 'data[]' combination_util(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 0 , _UpperCAmelCase , 0 ) if __name__ == "__main__": # Driver code to check the function above a = [1_0, 2_0, 3_0, 4_0, 5_0] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
155
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class __lowerCAmelCase ( A ): UpperCamelCase = '''open-llama''' def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = rms_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = kwargs.pop( 'use_memorry_efficient_attention' , A) _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_dropout_prob _UpperCAmelCase = use_stable_embedding _UpperCAmelCase = shared_input_output_embedding _UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , ) def _lowerCamelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F"got {self.rope_scaling}") _UpperCAmelCase = self.rope_scaling.get('type' , A) _UpperCAmelCase = self.rope_scaling.get('factor' , A) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
339
0
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) # TODO Update this UpperCamelCase = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class snake_case_ ( __A ): __A : Optional[Any] = "esm" def __init__( self : Dict , lowercase_ : int=None , lowercase_ : Tuple=None , lowercase_ : str=None , lowercase_ : Dict=7_68 , lowercase_ : Optional[int]=12 , lowercase_ : Tuple=12 , lowercase_ : List[str]=30_72 , lowercase_ : int=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Dict=10_26 , lowercase_ : Any=0.02 , lowercase_ : int=1E-12 , lowercase_ : Any="absolute" , lowercase_ : Any=True , lowercase_ : Any=None , lowercase_ : Optional[int]=False , lowercase_ : Optional[int]=False , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Any , ) -> Dict: super().__init__(pad_token_id=lowercase_ , mask_token_id=lowercase_ , **lowercase_ ) lowercase__ : Any = vocab_size lowercase__ : str = hidden_size lowercase__ : Tuple = num_hidden_layers lowercase__ : str = num_attention_heads lowercase__ : Optional[int] = intermediate_size lowercase__ : Dict = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : List[Any] = max_position_embeddings lowercase__ : Optional[Any] = initializer_range lowercase__ : List[str] = layer_norm_eps lowercase__ : Dict = position_embedding_type lowercase__ : Any = use_cache lowercase__ : Optional[Any] = emb_layer_norm_before lowercase__ : Dict = token_dropout lowercase__ : int = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) lowercase__ : List[Any] = EsmFoldConfig() elif isinstance(lowercase_ , lowercase_ ): lowercase__ : str = EsmFoldConfig(**lowercase_ ) lowercase__ : str = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) lowercase__ : Any = get_default_vocab_list() else: lowercase__ : List[str] = vocab_list else: lowercase__ : List[str] = None lowercase__ : Union[str, Any] = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowercase_ ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: lowercase__ : int = super().to_dict() if isinstance(self.esmfold_config , lowercase_ ): lowercase__ : List[Any] = self.esmfold_config.to_dict() return output @dataclass class snake_case_ : __A : Tuple = None __A : str = True __A : Optional[int] = False __A : str = False __A : Dict = False __A : Union[str, Any] = 0 __A : List[Any] = True __A : List[Any] = False __A : str = 128 __A : int = None def __UpperCamelCase ( self : Optional[Any] ) -> List[str]: if self.trunk is None: lowercase__ : str = TrunkConfig() elif isinstance(self.trunk , lowercase_ ): lowercase__ : Optional[Any] = TrunkConfig(**self.trunk ) def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: lowercase__ : int = asdict(self ) lowercase__ : Any = self.trunk.to_dict() return output @dataclass class snake_case_ : __A : Union[str, Any] = 48 __A : Dict = 1024 __A : Dict = 128 __A : Dict = 32 __A : List[str] = 32 __A : List[Any] = 32 __A : Optional[int] = 0 __A : Optional[Any] = 0 __A : int = False __A : str = 4 __A : Tuple = 128 __A : List[Any] = None def __UpperCamelCase ( self : str ) -> List[str]: if self.structure_module is None: lowercase__ : Tuple = StructureModuleConfig() elif isinstance(self.structure_module , lowercase_ ): lowercase__ : List[str] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase__ : int = self.sequence_state_dim // self.sequence_head_width lowercase__ : str = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def __UpperCamelCase ( self : int ) -> int: lowercase__ : Dict = asdict(self ) lowercase__ : Union[str, Any] = self.structure_module.to_dict() return output @dataclass class snake_case_ : __A : List[str] = 384 __A : str = 128 __A : Dict = 16 __A : Optional[Any] = 128 __A : Optional[Any] = 12 __A : int = 4 __A : Union[str, Any] = 8 __A : str = 0.1 __A : Dict = 8 __A : List[str] = 1 __A : Dict = 2 __A : Dict = 7 __A : Tuple = 10 __A : Optional[Any] = 1e-8 __A : List[str] = 1e5 def __UpperCamelCase ( self : Dict ) -> List[str]: return asdict(self ) def lowercase_ ( ): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
87
def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') ) def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = credit_card_number _UpperCAmelCase = 0 _UpperCAmelCase = len(_UpperCAmelCase ) - 2 for i in range(_UpperCAmelCase , -1 , -2 ): # double the value of every second digit _UpperCAmelCase = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 _UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(F"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(_UpperCAmelCase ) <= 16: print(F"{error_message} of its length." ) return False if not validate_initial_digits(_UpperCAmelCase ): print(F"{error_message} of its first two digits." ) return False if not luhn_validation(_UpperCAmelCase ): print(F"{error_message} it fails the Luhn check." ) return False print(F"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("4111111111111111") validate_credit_card_number("32323")
339
0
'''simple docstring''' import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1_2_8 , _UpperCAmelCase=3_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> str: __UpperCamelCase : Union[str, Any] = parent __UpperCamelCase : Optional[Any] = batch_size __UpperCamelCase : List[str] = seq_length __UpperCamelCase : int = is_training __UpperCamelCase : Union[str, Any] = use_input_mask __UpperCamelCase : Optional[int] = use_token_type_ids __UpperCamelCase : Dict = use_labels __UpperCamelCase : Tuple = vocab_size __UpperCamelCase : Any = hidden_size __UpperCamelCase : str = num_hidden_layers __UpperCamelCase : List[str] = num_attention_heads __UpperCamelCase : Any = intermediate_size __UpperCamelCase : Any = hidden_act __UpperCamelCase : Union[str, Any] = hidden_dropout_prob __UpperCamelCase : Dict = attention_probs_dropout_prob __UpperCamelCase : Optional[int] = max_position_embeddings __UpperCamelCase : Union[str, Any] = type_vocab_size __UpperCamelCase : Optional[Any] = type_sequence_label_size __UpperCamelCase : str = initializer_range __UpperCamelCase : Tuple = num_labels __UpperCamelCase : Dict = num_choices __UpperCamelCase : Any = scope def a_ (self ) -> List[Any]: __UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase : Dict = None if self.use_input_mask: __UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase : List[Any] = None if self.use_token_type_ids: __UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCamelCase : Union[str, Any] = None __UpperCamelCase : Tuple = None __UpperCamelCase : str = None if self.use_labels: __UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a_ (self ) -> Tuple: return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) def a_ (self ) -> List[Any]: ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) : Union[str, Any] = self.prepare_config_and_inputs() __UpperCamelCase : List[Any] = True __UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : Union[str, Any] = NezhaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) __UpperCamelCase : Optional[int] = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Dict: __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Dict = NezhaModel(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : Tuple = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , ) __UpperCamelCase : Optional[int] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , ) __UpperCamelCase : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: __UpperCamelCase : Optional[Any] = NezhaForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: __UpperCamelCase : Union[str, Any] = NezhaForNextSentencePrediction(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : List[Any] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : str = NezhaForPreTraining(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : List[str] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : int = NezhaForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : Dict = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: __UpperCamelCase : int = self.num_labels __UpperCamelCase : Union[str, Any] = NezhaForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: __UpperCamelCase : int = self.num_labels __UpperCamelCase : str = NezhaForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : Dict = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: __UpperCamelCase : Tuple = self.num_choices __UpperCamelCase : Tuple = NezhaForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase : Any = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) : Optional[Any] = config_and_inputs __UpperCamelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) A = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) A = True def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> List[str]: __UpperCamelCase : List[str] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) if return_labels: if model_class in get_values(_UpperCAmelCase ): __UpperCamelCase : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase ) __UpperCamelCase : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase ) return inputs_dict def a_ (self ) -> Any: __UpperCamelCase : Dict = NezhaModelTester(self ) __UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 ) def a_ (self ) -> Dict: self.config_tester.run_common_tests() def a_ (self ) -> List[str]: __UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a_ (self ) -> List[str]: __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase ) def a_ (self ) -> List[str]: ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() __UpperCamelCase : Tuple = None self.model_tester.create_and_check_model_as_decoder( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) def a_ (self ) -> Dict: __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def a_ (self ) -> Optional[Any]: __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def a_ (self ) -> int: __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*_UpperCAmelCase ) def a_ (self ) -> List[str]: __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase ) def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def a_ (self ) -> str: __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def a_ (self ) -> Any: for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase : str = NezhaModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @slow @require_torch_gpu def a_ (self ) -> str: __UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __UpperCamelCase : Any = True __UpperCamelCase : List[Any] = model_class(config=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Optional[int] = torch.jit.trace( _UpperCAmelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , "bert.pt" ) ) __UpperCamelCase : Union[str, Any] = torch.jit.load(os.path.join(_UpperCAmelCase , "bert.pt" ) , map_location=_UpperCAmelCase ) loaded(inputs_dict["input_ids"].to(_UpperCAmelCase ) , inputs_dict["attention_mask"].to(_UpperCAmelCase ) ) @require_torch class A ( unittest.TestCase ): '''simple docstring''' @slow def a_ (self ) -> Tuple: __UpperCamelCase : Any = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" ) __UpperCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __UpperCamelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __UpperCamelCase : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] __UpperCamelCase : Optional[int] = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , _UpperCAmelCase ) __UpperCamelCase : Dict = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) ) @slow def a_ (self ) -> Tuple: __UpperCamelCase : int = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" ) __UpperCamelCase : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __UpperCamelCase : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __UpperCamelCase : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] __UpperCamelCase : Optional[int] = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape , _UpperCAmelCase ) __UpperCamelCase : Optional[Any] = torch.tensor( [[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
298
from functools import reduce UpperCAmelCase__ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A ( _UpperCAmelCase : str = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_UpperCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
339
0
def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase= F'Input value of [number={number}] must be an integer' raise TypeError(_UpperCAmelCase ) if number < 1: __lowercase= F'Input value of [number={number}] must be > 0' raise ValueError(_UpperCAmelCase ) __lowercase= 1 for i in range(1 , _UpperCAmelCase ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
295
from __future__ import annotations from collections.abc import Callable UpperCAmelCase__ = list[list[float | int]] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for row in range(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = matrix[row][col] _UpperCAmelCase = vector[row][0] _UpperCAmelCase = 0 _UpperCAmelCase = 0 while row < size and col < size: # pivoting _UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _UpperCAmelCase ): _UpperCAmelCase = augmented[rowa][col] / augmented[row][col] _UpperCAmelCase = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _UpperCAmelCase ): for row in range(_UpperCAmelCase ): _UpperCAmelCase = augmented[row][col] / augmented[col][col] for cola in range(_UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase ) ] def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for x_val, y_val in enumerate(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = (x_val + 1) ** (size - col - 1) _UpperCAmelCase = y_val _UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase ) def interpolated_func(_UpperCAmelCase : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_UpperCAmelCase ) ) return interpolated_func def A ( _UpperCAmelCase : int ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int: '''simple docstring''' _UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )] _UpperCAmelCase = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _UpperCAmelCase = 0 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for poly in polynomials: _UpperCAmelCase = 1 while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ): x_val += 1 ret += poly(_UpperCAmelCase ) return ret if __name__ == "__main__": print(f"""{solution() = }""")
339
0
'''simple docstring''' def _lowerCamelCase ( lowercase : list[int] , lowercase : str ) -> list[int]: _a = int(_UpperCAmelCase ) # Initialize Result _a = [] # Traverse through all denomination for denomination in reversed(_UpperCAmelCase ): # Find denominations while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ): total_value -= int(_UpperCAmelCase ) answer.append(_UpperCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCAmelCase_ : Tuple = [] lowerCAmelCase_ : str = '0' if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): lowerCAmelCase_ : Any = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(f"""Denomination {i}: """).strip())) lowerCAmelCase_ : Tuple = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter lowerCAmelCase_ : Union[str, Any] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] lowerCAmelCase_ : int = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(f"""Following is minimal change for {value}: """) lowerCAmelCase_ : Tuple = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
63
from __future__ import annotations def A ( _UpperCAmelCase : list[int] ) -> bool: '''simple docstring''' return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
0
"""simple docstring""" import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration __magic_name__ = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = ["""layers""", """blocks"""] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) __magic_name__ = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = list(s_dict.keys() ) for key in keys: __SCREAMING_SNAKE_CASE = key for k, v in WHISPER_MAPPING.items(): if k in key: __SCREAMING_SNAKE_CASE = new_key.replace(_UpperCAmelCase , _UpperCAmelCase ) print(f"{key} -> {new_key}" ) __SCREAMING_SNAKE_CASE = s_dict.pop(_UpperCAmelCase ) return s_dict def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = emb.weight.shape __SCREAMING_SNAKE_CASE = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = os.path.basename(_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = url.split("""/""" )[-2] __SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ): raise RuntimeError(f"{download_target} exists and is not a regular file" ) if os.path.isfile(_UpperCAmelCase ): __SCREAMING_SNAKE_CASE = open(_UpperCAmelCase , """rb""" ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , """wb""" ) as output: with tqdm( total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=_UpperCAmelCase , unit_divisor=1024 ) as loop: while True: __SCREAMING_SNAKE_CASE = source.read(8192 ) if not buffer: break output.write(_UpperCAmelCase ) loop.update(len(_UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = open(_UpperCAmelCase , """rb""" ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( """Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" ) return model_bytes def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): if ".pt" not in checkpoint_path: __SCREAMING_SNAKE_CASE = _download(_MODELS[checkpoint_path] ) else: __SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location="""cpu""" ) __SCREAMING_SNAKE_CASE = original_checkpoint["""dims"""] __SCREAMING_SNAKE_CASE = original_checkpoint["""model_state_dict"""] __SCREAMING_SNAKE_CASE = state_dict["""decoder.token_embedding.weight"""] remove_ignore_keys_(_UpperCAmelCase ) rename_keys(_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = state_dict["""decoder.layers.0.fc1.weight"""].shape[0] __SCREAMING_SNAKE_CASE = WhisperConfig( vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , ) __SCREAMING_SNAKE_CASE = WhisperForConditionalGeneration(_UpperCAmelCase ) __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" f" but all the following weights are missing {missing}" ) if tie_embeds: __SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.decoder.embed_tokens ) else: __SCREAMING_SNAKE_CASE = proj_out_weights model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") __magic_name__ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
100
import os UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} def A ( _UpperCAmelCase : str ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(_UpperCAmelCase ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( _UpperCAmelCase : int ) -> str: '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int: '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase ) _UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase ) savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase ) return savings if __name__ == "__main__": print(f"""{solution() = }""")
339
0
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __snake_case ( unittest.TestCase ): @slow def __a ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" ) SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""google/mt5-small""" ) SCREAMING_SNAKE_CASE__ = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids SCREAMING_SNAKE_CASE__ = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids SCREAMING_SNAKE_CASE__ = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id ) SCREAMING_SNAKE_CASE__ = model(_lowercase , decoder_input_ids=_lowercase ).logits SCREAMING_SNAKE_CASE__ = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean() SCREAMING_SNAKE_CASE__ = -(labels.shape[-1] * loss.item()) SCREAMING_SNAKE_CASE__ = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
219
import requests from bsa import BeautifulSoup def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str: '''simple docstring''' _UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' ) _UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} ) _UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' ) return anchors[2].get_text() if __name__ == "__main__": UpperCAmelCase__ = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
339
0
'''simple docstring''' import requests from bsa import BeautifulSoup def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , """html.parser""" ) _SCREAMING_SNAKE_CASE : Dict = soup.find("""div""" , attrs={"""class""": """gs_ri"""} ) _SCREAMING_SNAKE_CASE : Optional[Any] = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" ) return anchors[2].get_text() if __name__ == "__main__": UpperCAmelCase_ : int = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 2018, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
200
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_attention_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_choices def _lowerCamelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCAmelCase = None if self.use_attention_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCAmelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCamelCase ( self : List[Any]) -> List[str]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" _UpperCAmelCase = FlaxRoFormerModelTester(self) @slow def _lowerCamelCase ( self : List[Any]) -> Dict: """simple docstring""" for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A) _UpperCAmelCase = model(np.ones((1, 1))) self.assertIsNotNone(A) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base') _UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]]) _UpperCAmelCase = model(A)[0] _UpperCAmelCase = 5_00_00 _UpperCAmelCase = (1, 6, vocab_size) self.assertEqual(output.shape , A) _UpperCAmelCase = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]]) self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
339
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __A ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , lowerCamelCase__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , lowerCamelCase__=True , ): """simple docstring""" __UpperCamelCase : int =size if size is not None else {'height': 224, 'width': 224} __UpperCamelCase : Optional[Any] =crop_size if crop_size is not None else {'height': 18, 'width': 18} __UpperCamelCase : Optional[Any] =parent __UpperCamelCase : List[str] =batch_size __UpperCamelCase : Optional[Any] =num_channels __UpperCamelCase : Any =image_size __UpperCamelCase : Optional[Any] =min_resolution __UpperCamelCase : Tuple =max_resolution __UpperCamelCase : Tuple =do_resize __UpperCamelCase : Optional[int] =size __UpperCamelCase : Optional[int] =do_center_crop __UpperCamelCase : Union[str, Any] =crop_size __UpperCamelCase : Tuple =do_normalize __UpperCamelCase : int =image_mean __UpperCamelCase : Dict =image_std __UpperCamelCase : List[str] =do_convert_rgb def __lowercase ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def __lowercase ( self , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False ): """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __UpperCamelCase : Union[str, Any] =[] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: __UpperCamelCase : Tuple =[] for i in range(self.batch_size ): __UpperCamelCase , __UpperCamelCase : Union[str, Any] =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __UpperCamelCase : Optional[int] =[Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs] if torchify: __UpperCamelCase : Any =[torch.from_numpy(lowerCamelCase__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class __A ( a , unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Any =ChineseCLIPImageProcessor if is_vision_available() else None def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =ChineseCLIPImageProcessingTester(self , do_center_crop=lowerCamelCase__ ) @property def __lowercase ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Union[str, Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , 'do_resize' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'size' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'do_center_crop' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'center_crop' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'do_normalize' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'image_mean' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'image_std' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'do_convert_rgb' ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 224, 'width': 224} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) __UpperCamelCase : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def __lowercase ( self ): """simple docstring""" pass def __lowercase ( self ): """simple docstring""" __UpperCamelCase : int =self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : str =self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input __UpperCamelCase : Dict =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __UpperCamelCase : List[str] =image_processing(lowerCamelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase : Optional[Any] =self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input __UpperCamelCase : List[str] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __UpperCamelCase : Any =image_processing(lowerCamelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase : Union[str, Any] =self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input __UpperCamelCase : Any =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __UpperCamelCase : Any =image_processing(lowerCamelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) @require_torch @require_vision class __A ( a , unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Any =ChineseCLIPImageProcessor if is_vision_available() else None def __lowercase ( self ): """simple docstring""" __UpperCamelCase : int =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowerCamelCase__ ) __UpperCamelCase : int =3 @property def __lowercase ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[int] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , 'do_resize' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'size' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'do_center_crop' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'center_crop' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'do_normalize' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'image_mean' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'image_std' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'do_convert_rgb' ) ) def __lowercase ( self ): """simple docstring""" pass def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : Union[str, Any] =self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input __UpperCamelCase : List[str] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __UpperCamelCase : List[Any] =image_processing(lowerCamelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
71
UpperCAmelCase__ = {} def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: '''simple docstring''' # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _UpperCAmelCase = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 ) _UpperCAmelCase = state_late + state_absent + state_ontime _UpperCAmelCase = prizestrings return prizestrings def A ( _UpperCAmelCase : int = 30 ) -> int: '''simple docstring''' return _calculate(_UpperCAmelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
339
0
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def _lowerCAmelCase ( _UpperCamelCase : Iterable[str] , _UpperCamelCase : int ) -> Generator[tuple[str, ...], None, None]: """simple docstring""" _SCREAMING_SNAKE_CASE =iter(_UpperCAmelCase ) while True: _SCREAMING_SNAKE_CASE =tuple(itertools.islice(_UpperCAmelCase , _UpperCAmelCase ) ) if not chunk: return yield chunk def _lowerCAmelCase ( _UpperCamelCase : str ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE =''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _SCREAMING_SNAKE_CASE ='' if len(_UpperCAmelCase ) < 2: return dirty for i in range(len(_UpperCAmelCase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_UpperCAmelCase ) & 1: clean += "X" return clean def _lowerCAmelCase ( _UpperCamelCase : str ) -> list[str]: """simple docstring""" _SCREAMING_SNAKE_CASE ='ABCDEFGHIKLMNOPQRSTUVWXYZ' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _SCREAMING_SNAKE_CASE =[] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_UpperCAmelCase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_UpperCAmelCase ) return table def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE =generate_table(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE =prepare_input(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE ='' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_UpperCAmelCase , 2 ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(table.index(_UpperCAmelCase ) , 5 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(table.index(_UpperCAmelCase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE =generate_table(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE ='' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_UpperCAmelCase , 2 ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(table.index(_UpperCAmelCase ) , 5 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(table.index(_UpperCAmelCase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
47
import os import sys import unittest UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers") class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = find_backend(' if not is_torch_available():') self.assertEqual(A , 'torch') # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):') self.assertEqual(A , 'torch_and_transformers') # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _UpperCAmelCase = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):') self.assertEqual(A , 'torch_and_transformers_and_onnx') def _lowerCamelCase ( self : int) -> Dict: """simple docstring""" _UpperCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , A) self.assertIn('torch_and_transformers' , A) self.assertIn('flax_and_transformers' , A) self.assertIn('torch_and_transformers_and_onnx' , A) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch']) self.assertIn('FlaxUNet2DConditionModel' , objects['flax']) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers']) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers']) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy']) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx']) def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'') self.assertEqual(A , '\nCONSTANT = None\n') _UpperCAmelCase = create_dummy_object('function' , '\'torch\'') self.assertEqual( A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n') _UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' _UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'') self.assertEqual(A , A) def _lowerCamelCase ( self : Dict) -> int: """simple docstring""" _UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' _UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']}) self.assertEqual(dummy_files['torch'] , A)
339
0
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') ) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : List[Any] = credit_card_number __lowercase : List[str] = 0 __lowercase : str = len(_UpperCAmelCase ) - 2 for i in range(_UpperCAmelCase , -1 , -2 ): # double the value of every second digit __lowercase : Optional[int] = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 __lowercase : List[str] = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = f"""{credit_card_number} is an invalid credit card number because""" if not credit_card_number.isdigit(): print(f"""{error_message} it has nonnumerical characters.""" ) return False if not 13 <= len(_UpperCAmelCase ) <= 16: print(f"""{error_message} of its length.""" ) return False if not validate_initial_digits(_UpperCAmelCase ): print(f"""{error_message} of its first two digits.""" ) return False if not luhn_validation(_UpperCAmelCase ): print(f"""{error_message} it fails the Luhn check.""" ) return False print(f"""{credit_card_number} is a valid credit card number.""" ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('4111111111111111') validate_credit_card_number('32323')
249
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") UpperCAmelCase__ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , ) UpperCamelCase = field( default=1_0_2_4 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} ) def _lowerCamelCase ( self : str) -> List[Any]: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.') else: _UpperCAmelCase = self.train_file.split('.')[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." _UpperCAmelCase = self.validation_file.split('.')[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __lowerCAmelCase : UpperCamelCase = field( default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def A ( ) -> Optional[int]: '''simple docstring''' # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) datasets.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. _UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: _UpperCAmelCase = data_args.train_file.split('.' )[-1] _UpperCAmelCase = data_args.test_file.split('.' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." _UpperCAmelCase = data_args.test_file else: raise ValueError('Need either a GLUE task or a test file for `do_predict`.' ) for key in data_files.keys(): logger.info(F"load a local file for {key}: {data_files[key]}" ) if data_args.train_file.endswith('.csv' ): # Loading a dataset from local csv files _UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files _UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels _UpperCAmelCase = raw_datasets['train'].features['label'].names _UpperCAmelCase = len(_UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer _UpperCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , ) _UpperCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: _UpperCAmelCase = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _UpperCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. _UpperCAmelCase = {'Refused': 0, 'Entailed': 1} _UpperCAmelCase = {0: 'Refused', 1: 'Entailed'} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ): # Tokenize the texts def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ): _UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )] _UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd _UpperCAmelCase = examples['statement'] _UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) ) _UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ) _UpperCAmelCase = examples['label'] return result with training_args.main_process_first(desc='dataset map pre-processing' ): _UpperCAmelCase = raw_datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) _UpperCAmelCase = raw_datasets['train'] if data_args.max_train_samples is not None: _UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) _UpperCAmelCase = raw_datasets['validation'] if data_args.max_eval_samples is not None: _UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('--do_predict requires a test dataset' ) _UpperCAmelCase = raw_datasets['test'] if data_args.max_predict_samples is not None: _UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ): logger.info(F"Sample {index} of the training set: {train_dataset[index]}." ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCAmelCase : EvalPrediction ): _UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _UpperCAmelCase = default_data_collator elif training_args.fpaa: _UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) else: _UpperCAmelCase = None # Initialize our Trainer _UpperCAmelCase = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) _UpperCAmelCase = train_result.metrics _UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase ) ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , _UpperCAmelCase ) trainer.save_metrics('train' , _UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase ) _UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.log_metrics('eval' , _UpperCAmelCase ) trainer.save_metrics('eval' , _UpperCAmelCase ) if training_args.do_predict: logger.info('*** Predict ***' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. _UpperCAmelCase = predict_dataset.remove_columns('label' ) _UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) _UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' ) if trainer.is_world_process_zero(): with open(_UpperCAmelCase , 'w' ) as writer: logger.info('***** Predict Results *****' ) writer.write('index\tprediction\n' ) for index, item in enumerate(_UpperCAmelCase ): _UpperCAmelCase = label_list[item] writer.write(F"{index}\t{item}\n" ) _UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'} if training_args.push_to_hub: trainer.push_to_hub(**_UpperCAmelCase ) else: trainer.create_model_card(**_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
0
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def lowercase (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any]=None ) -> Optional[Any]: '''simple docstring''' assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match''' lowerCAmelCase = nn.Parameter(_UpperCAmelCase ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match''' lowerCAmelCase = nn.Parameter(_UpperCAmelCase ) def lowercase (snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : str ) -> List[Any]: '''simple docstring''' lowerCAmelCase = np.asarray(weights[0] ) lowerCAmelCase = np.asarray(weights[1] ) lowerCAmelCase = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(_UpperCAmelCase ).view(-1 , _UpperCAmelCase ).contiguous().transpose(0 , 1 ) , ) def lowercase (snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase = np.asarray(weights[0] ) lowerCAmelCase = np.asarray(weights[1] ) lowerCAmelCase = np.asarray(weights[2] ) lowerCAmelCase = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.self_attention.key , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(_UpperCAmelCase ).view(-1 , _UpperCAmelCase ).contiguous().transpose(0 , 1 ) , ) def lowercase (snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Dict ) -> Any: '''simple docstring''' lowerCAmelCase = weights[0][0][0] lowerCAmelCase = np.asarray(layer_norm_a[0] ) lowerCAmelCase = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , ) # lsh weights + output lowerCAmelCase = weights[0][1] if len(_UpperCAmelCase ) < 4: set_layer_weights_in_torch_lsh(_UpperCAmelCase , torch_block.attention , _UpperCAmelCase ) else: set_layer_weights_in_torch_local(_UpperCAmelCase , torch_block.attention , _UpperCAmelCase ) # intermediate weighs lowerCAmelCase = weights[2][0][1][2] # Chunked Feed Forward if len(_UpperCAmelCase ) == 4: lowerCAmelCase = intermediate_weights[2] # layernorm 2 lowerCAmelCase = np.asarray(intermediate_weights[0][0] ) lowerCAmelCase = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , ) # intermediate dense lowerCAmelCase = np.asarray(intermediate_weights[1][0] ) lowerCAmelCase = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , ) # intermediate out lowerCAmelCase = np.asarray(intermediate_weights[4][0] ) lowerCAmelCase = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , ) def lowercase (snake_case__ : Any , snake_case__ : Dict , snake_case__ : List[str] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase = torch_model.reformer # word embeds lowerCAmelCase = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(_UpperCAmelCase ) , ) if isinstance(weights[3] , _UpperCAmelCase ): lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): lowerCAmelCase = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f'''{position_embeddings[emb_idx]} emb does not match''' lowerCAmelCase = nn.Parameter(torch.tensor(_UpperCAmelCase ) ) lowerCAmelCase = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( _UpperCAmelCase ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # output layer norm lowerCAmelCase = np.asarray(weights[7][0] ) lowerCAmelCase = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , ) # output embeddings lowerCAmelCase = np.asarray(weights[9][0] ) lowerCAmelCase = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , ) def lowercase (snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase = ReformerConfig.from_json_file(_UpperCAmelCase ) print(f'''Building PyTorch model from configuration: {config}''' ) lowerCAmelCase = ReformerModelWithLMHead(_UpperCAmelCase ) with open(_UpperCAmelCase , """rb""" ) as f: lowerCAmelCase = pickle.load(_UpperCAmelCase )["""weights"""] set_model_weights_in_torch(_UpperCAmelCase , _UpperCAmelCase , config.hidden_size ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
155
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any: '''simple docstring''' _UpperCAmelCase = multiprocessing.Manager() _UpperCAmelCase = manager.list() _UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('timed out' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil _UpperCAmelCase = shutil.rmtree _UpperCAmelCase = os.rmdir _UpperCAmelCase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: _UpperCAmelCase = {} with swallow_io(): with time_limit(_UpperCAmelCase ): exec(_UpperCAmelCase , _UpperCAmelCase ) result.append('passed' ) except TimeoutException: result.append('timed out' ) except BaseException as e: result.append(F"failed: {e}" ) # Needed for cleaning up. _UpperCAmelCase = rmtree _UpperCAmelCase = rmdir _UpperCAmelCase = chdir @contextlib.contextmanager def A ( _UpperCAmelCase : Union[str, Any] ) -> Any: '''simple docstring''' def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ): raise TimeoutException('Timed out!' ) signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase ) signal.signal(signal.SIGALRM , _UpperCAmelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def A ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = WriteOnlyStringIO() with contextlib.redirect_stdout(_UpperCAmelCase ): with contextlib.redirect_stderr(_UpperCAmelCase ): with redirect_stdin(_UpperCAmelCase ): yield @contextlib.contextmanager def A ( ) -> Any: '''simple docstring''' with tempfile.TemporaryDirectory() as dirname: with chdir(_UpperCAmelCase ): yield dirname class __lowerCAmelCase ( A ): pass class __lowerCAmelCase ( io.StringIO ): def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any: """simple docstring""" raise OSError def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]: """simple docstring""" raise OSError def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]: """simple docstring""" raise OSError def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]: """simple docstring""" return False class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore UpperCamelCase = '''stdin''' @contextlib.contextmanager def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' if root == ".": yield return _UpperCAmelCase = os.getcwd() os.chdir(_UpperCAmelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str]=None ) -> Any: '''simple docstring''' if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins _UpperCAmelCase = None _UpperCAmelCase = None import os _UpperCAmelCase = '1' _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import shutil _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import subprocess _UpperCAmelCase = None # type: ignore _UpperCAmelCase = None import sys _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None
339
0
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Optional[int] ) -> Tuple: lowercase__ : Optional[Any] = inspect.getfile(accelerate.test_utils ) lowercase__ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) lowercase__ : Any = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) lowercase__ : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def __UpperCamelCase ( self : Tuple ) -> List[str]: print(F'''Found {torch.cuda.device_count()} devices.''' ) lowercase__ : str = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase_ , env=os.environ.copy() ) @require_multi_gpu def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: print(F'''Found {torch.cuda.device_count()} devices.''' ) lowercase__ : str = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(F'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase_ , env=os.environ.copy() ) @require_multi_gpu def __UpperCamelCase ( self : List[Any] ) -> Dict: lowercase__ : Dict = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase_ , env=os.environ.copy() ) @require_multi_gpu def __UpperCamelCase ( self : Any ) -> Tuple: print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) lowercase__ : Any = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(lowercase_ , env=os.environ.copy() ) if __name__ == "__main__": UpperCamelCase = Accelerator() UpperCamelCase = (accelerator.state.process_index + 2, 10) UpperCamelCase = torch.randint(0, 10, shape).to(accelerator.device) UpperCamelCase = '''''' UpperCamelCase = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." UpperCamelCase = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." UpperCamelCase = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
87
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str: '''simple docstring''' try: _UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: _UpperCAmelCase = strtobool(_UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False) def A ( _UpperCAmelCase : List[str] ) -> List[str]: '''simple docstring''' return unittest.skip('Test was skipped' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> str: '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> str: '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> Dict: '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> List[str]: '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : str ) -> str: '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> str: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> int: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> Any: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict: '''simple docstring''' if test_case is None: return partial(_UpperCAmelCase , version=_UpperCAmelCase ) return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> int: '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase ) UpperCAmelCase__ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def A ( _UpperCAmelCase : List[str] ) -> Any: '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase ) class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase = True @classmethod def _lowerCamelCase ( cls : List[Any]) -> Tuple: """simple docstring""" _UpperCAmelCase = tempfile.mkdtemp() @classmethod def _lowerCamelCase ( cls : Union[str, Any]) -> str: """simple docstring""" if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def _lowerCamelCase ( self : List[str]) -> List[Any]: """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir).glob('**/*'): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A) class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Dict) -> Tuple: """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple: """simple docstring""" _UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def A ( _UpperCAmelCase : List[Any] ) -> int: '''simple docstring''' _UpperCAmelCase = AcceleratorState() _UpperCAmelCase = tensor[None].clone().to(state.device ) _UpperCAmelCase = gather(_UpperCAmelCase ).cpu() _UpperCAmelCase = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _UpperCAmelCase ): return False return True class __lowerCAmelCase : def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]: """simple docstring""" _UpperCAmelCase = returncode _UpperCAmelCase = stdout _UpperCAmelCase = stderr async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]: '''simple docstring''' while True: _UpperCAmelCase = await stream.readline() if line: callback(_UpperCAmelCase ) else: break async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput: '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(_UpperCAmelCase ) ) _UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _UpperCAmelCase = [] _UpperCAmelCase = [] def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ): _UpperCAmelCase = line.decode('utf-8' ).rstrip() sink.append(_UpperCAmelCase ) if not quiet: print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ), ] , timeout=_UpperCAmelCase , ) return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase ) def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput: '''simple docstring''' _UpperCAmelCase = asyncio.get_event_loop() _UpperCAmelCase = loop.run_until_complete( _stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) ) _UpperCAmelCase = ' '.join(_UpperCAmelCase ) if result.returncode > 0: _UpperCAmelCase = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class __lowerCAmelCase ( A ): pass def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple: '''simple docstring''' try: _UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_UpperCAmelCase , 'decode' ): _UpperCAmelCase = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
339
0
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = CodeGenTokenizer A = CodeGenTokenizerFast A = True A = {"add_prefix_space": True} A = False def a_ (self ) -> Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCamelCase : int = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] __UpperCamelCase : Optional[Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __UpperCamelCase : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] __UpperCamelCase : List[Any] = {"unk_token": "<unk>"} __UpperCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) def a_ (self , **_UpperCAmelCase ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a_ (self , **_UpperCAmelCase ) -> Any: kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> List[Any]: __UpperCamelCase : Union[str, Any] = "lower newer" __UpperCamelCase : Any = "lower newer" return input_text, output_text def a_ (self ) -> Tuple: __UpperCamelCase : Union[str, Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCamelCase : int = "lower newer" __UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] __UpperCamelCase : Any = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = tokens + [tokenizer.unk_token] __UpperCamelCase : Union[str, Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def a_ (self ) -> Dict: if not self.test_rust_tokenizer: return __UpperCamelCase : Any = self.get_tokenizer() __UpperCamelCase : Dict = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) __UpperCamelCase : Any = "lower newer" # Testing tokenization __UpperCamelCase : List[str] = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing conversion to ids without special tokens __UpperCamelCase : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing conversion to ids with special tokens __UpperCamelCase : str = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) __UpperCamelCase : List[Any] = tokenizer.encode(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) __UpperCamelCase : List[Any] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing the unknown token __UpperCamelCase : Dict = tokens + [rust_tokenizer.unk_token] __UpperCamelCase : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any: pass def a_ (self , _UpperCAmelCase=1_5 ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): __UpperCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) # Simple input __UpperCamelCase : Union[str, Any] = "This is a simple input" __UpperCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"] __UpperCamelCase : List[Any] = ("This is a simple input", "This is a pair") __UpperCamelCase : Optional[Any] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" ) # Simple input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" ) # Simple input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" ) # Pair input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , ) def a_ (self ) -> List[str]: __UpperCamelCase : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input __UpperCamelCase : Optional[int] = "This is a simple input" __UpperCamelCase : List[Any] = ["This is a simple input looooooooong", "This is a simple input"] __UpperCamelCase : Dict = ("This is a simple input", "This is a pair") __UpperCamelCase : List[str] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] __UpperCamelCase : Tuple = tokenizer.pad_token_id __UpperCamelCase : Union[str, Any] = tokenizer(_UpperCAmelCase , padding="max_length" , max_length=3_0 , return_tensors="np" ) __UpperCamelCase : Union[str, Any] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors="np" ) __UpperCamelCase : Optional[Any] = tokenizer(*_UpperCAmelCase , padding="max_length" , max_length=6_0 , return_tensors="np" ) __UpperCamelCase : Optional[Any] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 3_0 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 6_0 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def a_ (self ) -> str: __UpperCamelCase : Dict = "$$$" __UpperCamelCase : Union[str, Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_UpperCAmelCase , add_bos_token=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = "This is a simple input" __UpperCamelCase : Optional[int] = ["This is a simple input 1", "This is a simple input 2"] __UpperCamelCase : Dict = tokenizer.bos_token_id __UpperCamelCase : List[Any] = tokenizer(_UpperCAmelCase ) __UpperCamelCase : Dict = tokenizer(_UpperCAmelCase ) self.assertEqual(out_s.input_ids[0] , _UpperCAmelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __UpperCamelCase : Dict = tokenizer.decode(out_s.input_ids ) __UpperCamelCase : List[Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , _UpperCAmelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def a_ (self ) -> List[str]: __UpperCamelCase : List[str] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" ) __UpperCamelCase : Dict = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#" __UpperCamelCase : List[str] = "\nif len_a > len_b: result = a\nelse: result = b" __UpperCamelCase : Union[str, Any] = tokenizer.encode(_UpperCAmelCase ) __UpperCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^\'\'\'", "^\"\"\"", "\n\n\n"] __UpperCamelCase : int = tokenizer.decode(_UpperCAmelCase , truncate_before_pattern=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def a_ (self ) -> List[str]: pass
298
from __future__ import annotations UpperCAmelCase__ = list[list[int]] # assigning initial values to the grid UpperCAmelCase__ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase__ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool: '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def A ( _UpperCAmelCase : Matrix ) -> Matrix | None: '''simple docstring''' if location := find_empty_location(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): _UpperCAmelCase = digit if sudoku(_UpperCAmelCase ) is not None: return grid _UpperCAmelCase = 0 return None def A ( _UpperCAmelCase : Matrix ) -> None: '''simple docstring''' for row in grid: for cell in row: print(_UpperCAmelCase , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") UpperCAmelCase__ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
339
0
from ...configuration_utils import PretrainedConfig class A ( A_ ): UpperCamelCase_ : List[Any] ='''bert-generation''' def __init__(self , lowerCAmelCase=5_0_3_5_8 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=2_4 , lowerCAmelCase=1_6 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase="absolute" , lowerCAmelCase=True , **lowerCAmelCase , ): super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= hidden_act __lowercase= intermediate_size __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= initializer_range __lowercase= layer_norm_eps __lowercase= position_embedding_type __lowercase= use_cache
295
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : List[Any]) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Value('string' , id='sequence'), }) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]: """simple docstring""" import nltk nltk.download('wordnet') if NLTK_VERSION >= version.Version('3.6.5'): nltk.download('punkt') if NLTK_VERSION >= version.Version('3.6.6'): nltk.download('omw-1.4') def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any: """simple docstring""" if NLTK_VERSION >= version.Version('3.6.5'): _UpperCAmelCase = [ meteor_score.single_meteor_score( word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] else: _UpperCAmelCase = [ meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] return {"meteor": np.mean(A)}
339
0
'''simple docstring''' from functools import lru_cache def _lowerCamelCase ( lowercase : int ) -> set: _a = 2 _a = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_UpperCAmelCase ) if n > 1: factors.add(_UpperCAmelCase ) return factors @lru_cache def _lowerCamelCase ( lowercase : int ) -> int: return len(unique_prime_factors(_UpperCAmelCase ) ) def _lowerCamelCase ( lowercase : list ) -> bool: return len(set(_UpperCAmelCase ) ) in (0, 1) def _lowerCamelCase ( lowercase : int ) -> list: _a = 2 while True: # Increment each value of a generated range _a = [base + i for i in range(_UpperCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. _a = [upf_len(_UpperCAmelCase ) for x in group] checker.append(_UpperCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(_UpperCAmelCase ): return group # Increment our base variable by 1 base += 1 def _lowerCamelCase ( lowercase : int = 4 ) -> int: _a = run(_UpperCAmelCase ) return results[0] if len(_UpperCAmelCase ) else None if __name__ == "__main__": print(solution())
63
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration UpperCAmelCase__ = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' _UpperCAmelCase = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def A ( _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = list(s_dict.keys() ) for key in keys: _UpperCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: _UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase ) print(F"{key} -> {new_key}" ) _UpperCAmelCase = s_dict.pop(_UpperCAmelCase ) return s_dict def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = emb.weight.shape _UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) _UpperCAmelCase = emb.weight.data return lin_layer def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes: '''simple docstring''' os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) _UpperCAmelCase = os.path.basename(_UpperCAmelCase ) _UpperCAmelCase = url.split('/' )[-2] _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ): raise RuntimeError(F"{download_target} exists and is not a regular file" ) if os.path.isfile(_UpperCAmelCase ): _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop: while True: _UpperCAmelCase = source.read(8_192 ) if not buffer: break output.write(_UpperCAmelCase ) loop.update(len(_UpperCAmelCase ) ) _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' if ".pt" not in checkpoint_path: _UpperCAmelCase = _download(_MODELS[checkpoint_path] ) else: _UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' ) _UpperCAmelCase = original_checkpoint['dims'] _UpperCAmelCase = original_checkpoint['model_state_dict'] _UpperCAmelCase = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(_UpperCAmelCase ) rename_keys(_UpperCAmelCase ) _UpperCAmelCase = True _UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0] _UpperCAmelCase = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) _UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F" but all the following weights are missing {missing}" ) if tie_embeds: _UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCAmelCase = proj_out_weights model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
339
0
"""simple docstring""" import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __magic_name__ = pytest.mark.integration __magic_name__ = {"comet"} __magic_name__ = importlib.util.find_spec("fairseq") is not None __magic_name__ = {"code_eval"} __magic_name__ = os.name == "nt" __magic_name__ = {"bertscore", "frugalscore", "perplexity"} __magic_name__ = importlib.util.find_spec("transformers") is not None def _lowerCAmelCase ( UpperCamelCase_ ): @wraps(_UpperCAmelCase ) def wrapper(self , UpperCamelCase_ ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("""\"test requires Fairseq\"""" ) else: test_case(self , _UpperCAmelCase ) return wrapper def _lowerCAmelCase ( UpperCamelCase_ ): @wraps(_UpperCAmelCase ) def wrapper(self , UpperCamelCase_ ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("""\"test requires transformers\"""" ) else: test_case(self , _UpperCAmelCase ) return wrapper def _lowerCAmelCase ( UpperCamelCase_ ): @wraps(_UpperCAmelCase ) def wrapper(self , UpperCamelCase_ ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("""\"test not supported on Windows\"""" ) else: test_case(self , _UpperCAmelCase ) return wrapper def _lowerCAmelCase ( ): __SCREAMING_SNAKE_CASE = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( __a , __a , __a ) @local class SCREAMING_SNAKE_CASE_ ( parameterized.TestCase ): """simple docstring""" __lowercase : Dict = {} __lowercase : Optional[Any] = None @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""") @pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""") def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = """[...]""" __SCREAMING_SNAKE_CASE = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , lowerCAmelCase__)).module_path) __SCREAMING_SNAKE_CASE = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCAmelCase__) # check parameters __SCREAMING_SNAKE_CASE = inspect.signature(metric._compute).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs # run doctest with self.patch_intensive_calls(lowerCAmelCase__ , metric_module.__name__): with self.use_local_metrics(): try: __SCREAMING_SNAKE_CASE = doctest.testmod(lowerCAmelCase__ , verbose=lowerCAmelCase__ , raise_on_error=lowerCAmelCase__) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0) self.assertGreater(results.attempted , 1) @slow def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = """[...]""" __SCREAMING_SNAKE_CASE = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , lowerCAmelCase__)).module_path) # run doctest with self.use_local_metrics(): __SCREAMING_SNAKE_CASE = doctest.testmod(lowerCAmelCase__ , verbose=lowerCAmelCase__ , raise_on_error=lowerCAmelCase__) self.assertEqual(results.failed , 0) self.assertGreater(results.attempted , 1) @contextmanager def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCAmelCase__): yield else: yield @contextmanager def snake_case_ ( self): def load_local_metric(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__): return load_metric(os.path.join("""metrics""" , lowerCAmelCase__) , *lowerCAmelCase__ , **lowerCAmelCase__) with patch("""datasets.load_metric""") as mock_load_metric: __SCREAMING_SNAKE_CASE = load_local_metric yield @classmethod def snake_case_ ( cls , lowerCAmelCase__): def wrapper(lowerCAmelCase__): __SCREAMING_SNAKE_CASE = contextmanager(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("""bleurt""" ) def _lowerCAmelCase ( UpperCamelCase_ ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" def snake_case_ ( self , lowerCAmelCase__): assert len(input_dict["""input_ids"""]) == 2 return np.array([1.03, 1.04]) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor: __SCREAMING_SNAKE_CASE = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("""bertscore""" ) def _lowerCAmelCase ( UpperCamelCase_ ): import torch def bert_cos_score_idf(UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_UpperCAmelCase ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("""bert_score.scorer.get_model""" ), patch( """bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf: __SCREAMING_SNAKE_CASE = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("""comet""" ) def _lowerCAmelCase ( UpperCamelCase_ ): def load_from_checkpoint(UpperCamelCase_ ): class SCREAMING_SNAKE_CASE_ : """simple docstring""" def snake_case_ ( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__): assert len(lowerCAmelCase__) == 2 __SCREAMING_SNAKE_CASE = [0.19, 0.92] return scores, sum(lowerCAmelCase__) / len(lowerCAmelCase__) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("""comet.download_model""" ) as mock_download_model: __SCREAMING_SNAKE_CASE = None with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint: __SCREAMING_SNAKE_CASE = load_from_checkpoint yield def _lowerCAmelCase ( ): __SCREAMING_SNAKE_CASE = load_metric(os.path.join("""metrics""" , """seqeval""" ) ) __SCREAMING_SNAKE_CASE = """ERROR""" __SCREAMING_SNAKE_CASE = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}" with pytest.raises(_UpperCAmelCase , match=re.escape(_UpperCAmelCase ) ): metric.compute(predictions=[] , references=[] , scheme=_UpperCAmelCase )
100
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ): UpperCamelCase = None UpperCamelCase = None class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ): UpperCamelCase = datasets.Audio() UpperCamelCase = '''audio''' UpperCamelCase = AudioFolderConfig UpperCamelCase = 42 # definition at the bottom of the script UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' ) UpperCAmelCase__ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] UpperCAmelCase__ = AUDIO_EXTENSIONS
339
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __lowerCamelCase : Optional[int] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = ['''MLukeTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys __lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
219
import sys from collections import defaultdict class __lowerCAmelCase : def __init__( self : int) -> str: """simple docstring""" _UpperCAmelCase = [] def _lowerCamelCase ( self : Any , A : List[str]) -> int: """simple docstring""" return self.node_position[vertex] def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = pos def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCAmelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCAmelCase = 2 * start + 1 else: _UpperCAmelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child] _UpperCAmelCase , _UpperCAmelCase = ( heap[start], positions[start], ) _UpperCAmelCase , _UpperCAmelCase = temp, tempa _UpperCAmelCase = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , A) self.top_to_bottom(A , A , A , A) def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any: """simple docstring""" _UpperCAmelCase = position[index] while index != 0: _UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _UpperCAmelCase = heap[parent] _UpperCAmelCase = position[parent] self.set_position(position[parent] , A) else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , A) break _UpperCAmelCase = parent else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , 0) def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str: """simple docstring""" _UpperCAmelCase = len(A) // 2 - 1 for i in range(A , -1 , -1): self.top_to_bottom(A , A , len(A) , A) def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = positions[0] _UpperCAmelCase = sys.maxsize self.top_to_bottom(A , 0 , len(A) , A) return temp def A ( _UpperCAmelCase : int ) -> Any: '''simple docstring''' _UpperCAmelCase = Heap() _UpperCAmelCase = [0] * len(_UpperCAmelCase ) _UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCAmelCase = [] for vertex in range(len(_UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_UpperCAmelCase ) heap.node_position.append(_UpperCAmelCase ) _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCAmelCase = 0 _UpperCAmelCase = distance heap.heapify(_UpperCAmelCase , _UpperCAmelCase ) for _ in range(1 , len(_UpperCAmelCase ) ): _UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCAmelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_UpperCAmelCase )] ): _UpperCAmelCase = distance heap.bottom_to_top( _UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
339
0
'''simple docstring''' from __future__ import annotations from collections.abc import Callable UpperCAmelCase_ : Dict = list[list[float | int]] def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = len(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE : Any = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )] _SCREAMING_SNAKE_CASE : int = 42 _SCREAMING_SNAKE_CASE : List[str] = 42 _SCREAMING_SNAKE_CASE : Any = 42 _SCREAMING_SNAKE_CASE : Optional[int] = 42 _SCREAMING_SNAKE_CASE : Any = 42 _SCREAMING_SNAKE_CASE : Any = 42 for row in range(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = matrix[row][col] _SCREAMING_SNAKE_CASE : Tuple = vector[row][0] _SCREAMING_SNAKE_CASE : Union[str, Any] = 0 _SCREAMING_SNAKE_CASE : Dict = 0 while row < size and col < size: # pivoting _SCREAMING_SNAKE_CASE : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _UpperCAmelCase ): _SCREAMING_SNAKE_CASE : Tuple = augmented[rowa][col] / augmented[row][col] _SCREAMING_SNAKE_CASE : Any = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _UpperCAmelCase ): for row in range(_UpperCAmelCase ): _SCREAMING_SNAKE_CASE : Optional[int] = augmented[row][col] / augmented[col][col] for cola in range(_UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase ) ] def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = len(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE : List[Any] = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] _SCREAMING_SNAKE_CASE : Tuple = [[0] for _ in range(_UpperCAmelCase )] _SCREAMING_SNAKE_CASE : Optional[int] = 42 _SCREAMING_SNAKE_CASE : List[str] = 42 _SCREAMING_SNAKE_CASE : Optional[int] = 42 _SCREAMING_SNAKE_CASE : List[str] = 42 for x_val, y_val in enumerate(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = (x_val + 1) ** (size - col - 1) _SCREAMING_SNAKE_CASE : Tuple = y_val _SCREAMING_SNAKE_CASE : List[str] = solve(_UpperCAmelCase , _UpperCAmelCase ) def interpolated_func(SCREAMING_SNAKE_CASE__ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_UpperCAmelCase ) ) return interpolated_func def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def snake_case_ ( SCREAMING_SNAKE_CASE__ = question_function , SCREAMING_SNAKE_CASE__ = 10 ): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )] _SCREAMING_SNAKE_CASE : Dict = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _SCREAMING_SNAKE_CASE : int = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 42 _SCREAMING_SNAKE_CASE : int = 42 for poly in polynomials: _SCREAMING_SNAKE_CASE : int = 1 while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ): x_val += 1 ret += poly(_UpperCAmelCase ) return ret if __name__ == "__main__": print(F"{solution() = }")
200
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]: '''simple docstring''' # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 _UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1 _UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple _UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _UpperCAmelCase = logits[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=0 ) _UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 ) _UpperCAmelCase = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] ) _UpperCAmelCase = tokenizer.mask_token _UpperCAmelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): _UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(_UpperCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base") UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() UpperCAmelCase__ = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
339
0
import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __A ( a , unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Tuple =RoCBertTokenizer UpperCamelCase__ : List[str] =None UpperCamelCase__ : Optional[int] =False UpperCamelCase__ : Optional[int] =True UpperCamelCase__ : Union[str, Any] =filter_non_english def __lowercase ( self ): """simple docstring""" super().setUp() __UpperCamelCase : Optional[Any] =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd'] __UpperCamelCase : List[Any] ={} __UpperCamelCase : Any ={} for i, value in enumerate(lowerCamelCase__ ): __UpperCamelCase : List[Any] =i __UpperCamelCase : Optional[int] =i __UpperCamelCase : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] ) __UpperCamelCase : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer: json.dump(lowerCamelCase__ , lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer: json.dump(lowerCamelCase__ , lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __UpperCamelCase : Tuple =tokenizer.tokenize('你好[SEP]你是谁' ) self.assertListEqual(lowerCamelCase__ , ['你', '好', '[SEP]', '你', '是', '谁'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Dict =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Union[str, Any] =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : str =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __UpperCamelCase : Any ={} for i, token in enumerate(lowerCamelCase__ ): __UpperCamelCase : Any =i __UpperCamelCase : Optional[int] =RoCBertWordpieceTokenizer(vocab=lowerCamelCase__ , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def __lowercase ( self ): """simple docstring""" self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def __lowercase ( self ): """simple docstring""" self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def __lowercase ( self ): """simple docstring""" self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) if self.test_rust_tokenizer: __UpperCamelCase : Optional[int] =self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) def __lowercase ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __UpperCamelCase : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) __UpperCamelCase : List[Any] =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' __UpperCamelCase : str =tokenizer_r.encode_plus( lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , ) __UpperCamelCase : int =tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ , 'do_lower_case' ) else False __UpperCamelCase : List[Any] =( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Dict =['的', '人', '有'] __UpperCamelCase : List[Any] =''.join(lowerCamelCase__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __UpperCamelCase : Dict =True __UpperCamelCase : List[str] =self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) __UpperCamelCase : List[Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) __UpperCamelCase : Optional[Any] =tokenizer_p.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) __UpperCamelCase : int =tokenizer_r.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) __UpperCamelCase : List[str] =tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ ) __UpperCamelCase : List[str] =tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) __UpperCamelCase : Optional[int] =False __UpperCamelCase : List[str] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) __UpperCamelCase : Tuple =self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) __UpperCamelCase : List[str] =tokenizer_r.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) __UpperCamelCase : List[str] =tokenizer_p.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) __UpperCamelCase : Dict =tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ ) __UpperCamelCase : int =tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ ) # it is expected that only the first Chinese character is not preceded by "##". __UpperCamelCase : str =[ f'##{token}' if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ ) ] self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @slow def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __UpperCamelCase : Optional[int] =tokenizer.encode('你好' , add_special_tokens=lowerCamelCase__ ) __UpperCamelCase : Optional[int] =tokenizer.encode('你是谁' , add_special_tokens=lowerCamelCase__ ) __UpperCamelCase : Dict =tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ) __UpperCamelCase : Tuple =tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): __UpperCamelCase : Dict ='你好,你是谁' __UpperCamelCase : str =tokenizer.tokenize(lowerCamelCase__ ) __UpperCamelCase : Optional[Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) __UpperCamelCase : Optional[Any] =tokenizer.convert_tokens_to_shape_ids(lowerCamelCase__ ) __UpperCamelCase : Dict =tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase__ ) __UpperCamelCase : Optional[Any] =tokenizer.prepare_for_model( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) __UpperCamelCase : Any =tokenizer.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
71
import math import unittest def A ( _UpperCAmelCase : int ) -> bool: '''simple docstring''' assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(11)) self.assertTrue(is_prime(13)) self.assertTrue(is_prime(17)) self.assertTrue(is_prime(19)) self.assertTrue(is_prime(23)) self.assertTrue(is_prime(29)) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" with self.assertRaises(A): is_prime(-19) self.assertFalse( is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , ) self.assertFalse( is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , ) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
339
0
'''simple docstring''' from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : Tuple = { "huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json", } class A__ ( A__ ): A__ = 'autoformer' A__ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : Any , _a : Optional[int] = None , _a : Optional[int] = None , _a : str = "student_t" , _a : str = "nll" , _a : int = 1 , _a : List[int] = [1, 2, 3, 4, 5, 6, 7] , _a : bool = True , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : Optional[List[int]] = None , _a : Optional[List[int]] = None , _a : int = 64 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : int = 32 , _a : int = 32 , _a : str = "gelu" , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : int = 100 , _a : float = 0.02 , _a : bool = True , _a : List[str]=True , _a : int = 10 , _a : int = 25 , _a : int = 3 , **_a : Tuple , ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =prediction_length _SCREAMING_SNAKE_CASE =context_length if context_length is not None else prediction_length _SCREAMING_SNAKE_CASE =distribution_output _SCREAMING_SNAKE_CASE =loss _SCREAMING_SNAKE_CASE =input_size _SCREAMING_SNAKE_CASE =num_time_features _SCREAMING_SNAKE_CASE =lags_sequence _SCREAMING_SNAKE_CASE =scaling _SCREAMING_SNAKE_CASE =num_dynamic_real_features _SCREAMING_SNAKE_CASE =num_static_real_features _SCREAMING_SNAKE_CASE =num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(_a ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) _SCREAMING_SNAKE_CASE =cardinality else: _SCREAMING_SNAKE_CASE =[0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(_a ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) _SCREAMING_SNAKE_CASE =embedding_dimension else: _SCREAMING_SNAKE_CASE =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality] _SCREAMING_SNAKE_CASE =num_parallel_samples # Transformer architecture configuration _SCREAMING_SNAKE_CASE =input_size * len(self.lags_sequence ) + self._number_of_features _SCREAMING_SNAKE_CASE =d_model _SCREAMING_SNAKE_CASE =encoder_attention_heads _SCREAMING_SNAKE_CASE =decoder_attention_heads _SCREAMING_SNAKE_CASE =encoder_ffn_dim _SCREAMING_SNAKE_CASE =decoder_ffn_dim _SCREAMING_SNAKE_CASE =encoder_layers _SCREAMING_SNAKE_CASE =decoder_layers _SCREAMING_SNAKE_CASE =dropout _SCREAMING_SNAKE_CASE =attention_dropout _SCREAMING_SNAKE_CASE =activation_dropout _SCREAMING_SNAKE_CASE =encoder_layerdrop _SCREAMING_SNAKE_CASE =decoder_layerdrop _SCREAMING_SNAKE_CASE =activation_function _SCREAMING_SNAKE_CASE =init_std _SCREAMING_SNAKE_CASE =use_cache # Autoformer _SCREAMING_SNAKE_CASE =label_length _SCREAMING_SNAKE_CASE =moving_average _SCREAMING_SNAKE_CASE =autocorrelation_factor super().__init__(is_encoder_decoder=_a , **_a ) @property def A ( self : List[Any] ) -> int: '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
47
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : str) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=A , hypotheses=A , min_len=A , max_len=A) }
339
0
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase = 50 ): __lowercase : Optional[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"{solution() = }")
249
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCAmelCase__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCAmelCase__ = TaTokenizerFast UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCAmelCase__ = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
339
0
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class SCREAMING_SNAKE_CASE__ ( _a , _a , unittest.TestCase ): _a = IFPipeline _a = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'} _a = TEXT_TO_IMAGE_BATCH_PARAMS _a = PipelineTesterMixin.required_optional_params - {'latents'} def __lowercase ( self : Optional[int] ): return self._get_dummy_components() def __lowercase ( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : str=0 ): if str(lowerCAmelCase ).startswith("""mps""" ): lowerCAmelCase = torch.manual_seed(lowerCAmelCase ) else: lowerCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def __lowercase ( self : Union[str, Any] ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def __lowercase ( self : List[str] ): super().test_save_load_floataa(expected_max_diff=1e-1 ) def __lowercase ( self : List[Any] ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __lowercase ( self : Tuple ): self._test_save_load_local() def __lowercase ( self : List[str] ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowercase ( self : Optional[Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowercase ( self : Optional[int] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : int ): lowerCAmelCase = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa ) lowerCAmelCase = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""" ) lowerCAmelCase , lowerCAmelCase = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() lowerCAmelCase = None lowerCAmelCase = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img lowerCAmelCase = IFImgaImgPipeline(**pipe_a.components ) lowerCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting lowerCAmelCase = IFInpaintingPipeline(**pipe_a.components ) lowerCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def __lowercase ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ): _start_torch_memory_measurement() lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase = pipe_a( prompt_embeds=lowerCAmelCase , negative_prompt_embeds=lowerCAmelCase , num_inference_steps=2 , generator=lowerCAmelCase , output_type="""np""" , ) lowerCAmelCase = output.images[0] assert image.shape == (64, 64, 3) lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" ) assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase ) # pipeline 2 _start_torch_memory_measurement() lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase ) lowerCAmelCase = pipe_a( prompt_embeds=lowerCAmelCase , negative_prompt_embeds=lowerCAmelCase , image=lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , ) lowerCAmelCase = output.images[0] assert image.shape == (256, 256, 3) lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase ) def __lowercase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple ): _start_torch_memory_measurement() lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase ) lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase = pipe_a( prompt_embeds=lowerCAmelCase , negative_prompt_embeds=lowerCAmelCase , image=lowerCAmelCase , num_inference_steps=2 , generator=lowerCAmelCase , output_type="""np""" , ) lowerCAmelCase = output.images[0] assert image.shape == (64, 64, 3) lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" ) assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase ) # pipeline 2 _start_torch_memory_measurement() lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase ) lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase ) lowerCAmelCase = pipe_a( prompt_embeds=lowerCAmelCase , negative_prompt_embeds=lowerCAmelCase , image=lowerCAmelCase , original_image=lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , ) lowerCAmelCase = output.images[0] assert image.shape == (256, 256, 3) lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase ) def __lowercase ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple ): _start_torch_memory_measurement() lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase ) lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase ) lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase = pipe_a( prompt_embeds=lowerCAmelCase , negative_prompt_embeds=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , num_inference_steps=2 , generator=lowerCAmelCase , output_type="""np""" , ) lowerCAmelCase = output.images[0] assert image.shape == (64, 64, 3) lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" ) assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase ) # pipeline 2 _start_torch_memory_measurement() lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase ) lowerCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase ) lowerCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase ) lowerCAmelCase = pipe_a( prompt_embeds=lowerCAmelCase , negative_prompt_embeds=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , original_image=lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , ) lowerCAmelCase = output.images[0] assert image.shape == (256, 256, 3) lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase ) def lowercase () -> List[str]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
155
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class __lowerCAmelCase ( A ): UpperCamelCase = '''open-llama''' def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = rms_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = kwargs.pop( 'use_memorry_efficient_attention' , A) _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_dropout_prob _UpperCAmelCase = use_stable_embedding _UpperCAmelCase = shared_input_output_embedding _UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , ) def _lowerCamelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F"got {self.rope_scaling}") _UpperCAmelCase = self.rope_scaling.get('type' , A) _UpperCAmelCase = self.rope_scaling.get('factor' , A) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
339
0
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): return abs(_UpperCAmelCase) if a == 0 else greatest_common_divisor(b % a , _UpperCAmelCase) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): while y: # --> when y=0 then loop will terminate and return x as final GCD. lowercase__ , lowercase__ : Any = y, x % y return abs(_UpperCAmelCase) def lowercase_ ( ): try: lowercase__ : str = input("Enter two integers separated by comma (,): ").split(",") lowercase__ : int = int(nums[0]) lowercase__ : List[Any] = int(nums[1]) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(_UpperCAmelCase , _UpperCAmelCase)}''') print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_UpperCAmelCase , _UpperCAmelCase)}''') except (IndexError, UnboundLocalError, ValueError): print("Wrong input") if __name__ == "__main__": main()
87
def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') ) def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = credit_card_number _UpperCAmelCase = 0 _UpperCAmelCase = len(_UpperCAmelCase ) - 2 for i in range(_UpperCAmelCase , -1 , -2 ): # double the value of every second digit _UpperCAmelCase = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 _UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(F"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(_UpperCAmelCase ) <= 16: print(F"{error_message} of its length." ) return False if not validate_initial_digits(_UpperCAmelCase ): print(F"{error_message} of its first two digits." ) return False if not luhn_validation(_UpperCAmelCase ): print(F"{error_message} it fails the Luhn check." ) return False print(F"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("4111111111111111") validate_credit_card_number("32323")
339
0
'''simple docstring''' import cmath import math def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Union[str, Any] = math.radians(_UpperCAmelCase ) __UpperCamelCase : int = math.radians(_UpperCAmelCase ) # Convert voltage and current to rectangular form __UpperCamelCase : Tuple = cmath.rect(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Tuple = cmath.rect(_UpperCAmelCase , _UpperCAmelCase ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
298
from functools import reduce UpperCAmelCase__ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A ( _UpperCAmelCase : str = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_UpperCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
339
0
from __future__ import annotations from collections import Counter from random import random class A : def __init__(self ): __lowercase= {} def _A (self , lowerCAmelCase ): __lowercase= {} def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if nodea not in self.connections: self.add_node(lowerCAmelCase ) if nodea not in self.connections: self.add_node(lowerCAmelCase ) __lowercase= probability def _A (self ): return list(self.connections ) def _A (self , lowerCAmelCase ): __lowercase= 0 __lowercase= random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> dict[str, int]: '''simple docstring''' __lowercase= MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __lowercase= Counter(graph.get_nodes() ) __lowercase= start for _ in range(_UpperCAmelCase ): __lowercase= graph.transition(_UpperCAmelCase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
295
from __future__ import annotations from collections.abc import Callable UpperCAmelCase__ = list[list[float | int]] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for row in range(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = matrix[row][col] _UpperCAmelCase = vector[row][0] _UpperCAmelCase = 0 _UpperCAmelCase = 0 while row < size and col < size: # pivoting _UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _UpperCAmelCase ): _UpperCAmelCase = augmented[rowa][col] / augmented[row][col] _UpperCAmelCase = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _UpperCAmelCase ): for row in range(_UpperCAmelCase ): _UpperCAmelCase = augmented[row][col] / augmented[col][col] for cola in range(_UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase ) ] def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for x_val, y_val in enumerate(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = (x_val + 1) ** (size - col - 1) _UpperCAmelCase = y_val _UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase ) def interpolated_func(_UpperCAmelCase : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_UpperCAmelCase ) ) return interpolated_func def A ( _UpperCAmelCase : int ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int: '''simple docstring''' _UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )] _UpperCAmelCase = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _UpperCAmelCase = 0 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for poly in polynomials: _UpperCAmelCase = 1 while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ): x_val += 1 ret += poly(_UpperCAmelCase ) return ret if __name__ == "__main__": print(f"""{solution() = }""")
339
0
'''simple docstring''' from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ : Optional[int] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' lowerCAmelCase_ : int = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n' lowerCAmelCase_ : Union[str, Any] = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE (datasets.Metric ): """simple docstring""" def UpperCamelCase__ ( self : Any ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def UpperCamelCase__ ( self : List[Any] ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def UpperCamelCase__ ( self : str , __a : Optional[int] , __a : List[str] , __a : str=None , __a : List[str]="uniform_average" , __a : Tuple=True ): _a = mean_squared_error( __a , __a , sample_weight=__a , multioutput=__a , squared=__a ) return {"mse": mse}
63
from __future__ import annotations def A ( _UpperCAmelCase : list[int] ) -> bool: '''simple docstring''' return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ): __SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 1_8, """width""": 1_8} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std def snake_case_ ( self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ): """simple docstring""" __lowercase : Optional[Any] = ViTImageProcessor if is_vision_available() else None def snake_case_ ( self): __SCREAMING_SNAKE_CASE = EfficientFormerImageProcessorTester(self) @property def snake_case_ ( self): return self.image_proc_tester.prepare_image_processor_dict() def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""")) self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""")) self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""")) self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""")) self.assertTrue(hasattr(lowerCAmelCase__ , """size""")) def snake_case_ ( self): pass def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) # create random PIL images __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image) # Test not batched input __SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray) # Test not batched input __SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor) # Test not batched input __SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
100
import os UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} def A ( _UpperCAmelCase : str ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(_UpperCAmelCase ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( _UpperCAmelCase : int ) -> str: '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int: '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase ) _UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase ) savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase ) return savings if __name__ == "__main__": print(f"""{solution() = }""")
339
0
import glob import os import random from string import ascii_lowercase, digits import cva __lowerCamelCase : List[str] = '''''' __lowerCamelCase : Any = '''''' __lowerCamelCase : List[str] = '''''' __lowerCamelCase : Dict = 1 # (0 is vertical, 1 is horizontal) def __SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_dataset(_UpperCAmelCase , _UpperCAmelCase ) print("""Processing...""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = update_image_and_anno(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for index, image in enumerate(_UpperCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' SCREAMING_SNAKE_CASE__ = random_chars(32 ) SCREAMING_SNAKE_CASE__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] SCREAMING_SNAKE_CASE__ = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(f"""/{file_root}.jpg""" , _UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"""Success {index+1}/{len(_UpperCAmelCase )} with {file_name}""" ) SCREAMING_SNAKE_CASE__ = [] for anno in new_annos[index]: SCREAMING_SNAKE_CASE__ = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(_UpperCAmelCase ) with open(f"""/{file_root}.txt""" , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str ) -> tuple[list, list]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] for label_file in glob.glob(os.path.join(_UpperCAmelCase , """*.txt""" ) ): SCREAMING_SNAKE_CASE__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(_UpperCAmelCase ) as in_file: SCREAMING_SNAKE_CASE__ = in_file.readlines() SCREAMING_SNAKE_CASE__ = os.path.join(_UpperCAmelCase , f"""{label_name}.jpg""" ) SCREAMING_SNAKE_CASE__ = [] for obj_list in obj_lists: SCREAMING_SNAKE_CASE__ = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(_UpperCAmelCase ) labels.append(_UpperCAmelCase ) return img_paths, labels def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list , __UpperCamelCase : list , __UpperCamelCase : int = 1 ) -> tuple[list, list, list]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] for idx in range(len(_UpperCAmelCase ) ): SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = img_list[idx] path_list.append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = anno_list[idx] SCREAMING_SNAKE_CASE__ = cva.imread(_UpperCAmelCase ) if flip_type == 1: SCREAMING_SNAKE_CASE__ = cva.flip(_UpperCAmelCase , _UpperCAmelCase ) for bbox in img_annos: SCREAMING_SNAKE_CASE__ = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: SCREAMING_SNAKE_CASE__ = cva.flip(_UpperCAmelCase , _UpperCAmelCase ) for bbox in img_annos: SCREAMING_SNAKE_CASE__ = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(_UpperCAmelCase ) new_imgs_list.append(_UpperCAmelCase ) return new_imgs_list, new_annos_lists, path_list def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 32 ) -> str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" SCREAMING_SNAKE_CASE__ = ascii_lowercase + digits return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) ) if __name__ == "__main__": main() print('''DONE ✅''')
219
import requests from bsa import BeautifulSoup def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str: '''simple docstring''' _UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' ) _UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} ) _UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' ) return anchors[2].get_text() if __name__ == "__main__": UpperCAmelCase__ = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
339
0
'''simple docstring''' import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowercase__ ( _snake_case ): '''simple docstring''' def UpperCAmelCase_ ( self , __snake_case ): with open(__snake_case , encoding="""utf-8""" ) as input_file: _SCREAMING_SNAKE_CASE : Any = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = input_file.read() _SCREAMING_SNAKE_CASE : Optional[Any] = regexp.search(__snake_case ) return match def UpperCAmelCase_ ( self , __snake_case ): with open(__snake_case , encoding="""utf-8""" ) as input_file: _SCREAMING_SNAKE_CASE : Tuple = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL ) _SCREAMING_SNAKE_CASE : List[str] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` _SCREAMING_SNAKE_CASE : int = regexp.finditer(__snake_case ) _SCREAMING_SNAKE_CASE : str = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : Dict = Path("""./datasets""" ) _SCREAMING_SNAKE_CASE : str = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__snake_case ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : List[str] = Path("""./datasets""" ) _SCREAMING_SNAKE_CASE : Any = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_print_statements(str(__snake_case ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
200
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_attention_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_choices def _lowerCamelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCAmelCase = None if self.use_attention_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCAmelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCamelCase ( self : List[Any]) -> List[str]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" _UpperCAmelCase = FlaxRoFormerModelTester(self) @slow def _lowerCamelCase ( self : List[Any]) -> Dict: """simple docstring""" for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A) _UpperCAmelCase = model(np.ones((1, 1))) self.assertIsNotNone(A) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base') _UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]]) _UpperCAmelCase = model(A)[0] _UpperCAmelCase = 5_00_00 _UpperCAmelCase = (1, 6, vocab_size) self.assertEqual(output.shape , A) _UpperCAmelCase = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]]) self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
339
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) A_ :Dict = { '''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ :int = [ '''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FalconForCausalLM''', '''FalconModel''', '''FalconPreTrainedModel''', '''FalconForSequenceClassification''', '''FalconForTokenClassification''', '''FalconForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys A_ :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
71
UpperCAmelCase__ = {} def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: '''simple docstring''' # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _UpperCAmelCase = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 ) _UpperCAmelCase = state_late + state_absent + state_ontime _UpperCAmelCase = prizestrings return prizestrings def A ( _UpperCAmelCase : int = 30 ) -> int: '''simple docstring''' return _calculate(_UpperCAmelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
339
0
'''simple docstring''' from math import loga def _lowerCAmelCase ( _UpperCamelCase : int ) -> int: """simple docstring""" if a < 0: raise ValueError('Input value must be a positive integer' ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError('Input value must be a \'int\' type' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
47
import os import sys import unittest UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers") class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = find_backend(' if not is_torch_available():') self.assertEqual(A , 'torch') # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):') self.assertEqual(A , 'torch_and_transformers') # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _UpperCAmelCase = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):') self.assertEqual(A , 'torch_and_transformers_and_onnx') def _lowerCamelCase ( self : int) -> Dict: """simple docstring""" _UpperCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , A) self.assertIn('torch_and_transformers' , A) self.assertIn('flax_and_transformers' , A) self.assertIn('torch_and_transformers_and_onnx' , A) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch']) self.assertIn('FlaxUNet2DConditionModel' , objects['flax']) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers']) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers']) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy']) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx']) def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'') self.assertEqual(A , '\nCONSTANT = None\n') _UpperCAmelCase = create_dummy_object('function' , '\'torch\'') self.assertEqual( A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n') _UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' _UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'') self.assertEqual(A , A) def _lowerCamelCase ( self : Dict) -> int: """simple docstring""" _UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' _UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']}) self.assertEqual(dummy_files['torch'] , A)
339
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } a_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : List[Any] = {} with open(_UpperCAmelCase , '''r''' ) as file: for line_number, line in enumerate(_UpperCAmelCase ): __lowercase : List[str] = line.strip() if line: __lowercase : Union[str, Any] = line.split() __lowercase : Tuple = line_number __lowercase : Tuple = words[0] __lowercase : Any = value return result def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): for attribute in key.split('''.''' ): __lowercase : Optional[int] = getattr(_UpperCAmelCase , _UpperCAmelCase ) __lowercase : Dict = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_UpperCAmelCase ): __lowercase : Union[str, Any] = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowercase : Dict = '''param''' if weight_type is not None and weight_type != "param": __lowercase : Union[str, Any] = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape elif weight_type is not None and weight_type == "param": __lowercase : Tuple = hf_pointer for attribute in hf_param_name.split('''.''' ): __lowercase : Any = getattr(_UpperCAmelCase , _UpperCAmelCase ) __lowercase : List[Any] = shape_pointer.shape # let's reduce dimension __lowercase : Tuple = value[0] else: __lowercase : Tuple = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowercase : Dict = value elif weight_type == "weight_g": __lowercase : Union[str, Any] = value elif weight_type == "weight_v": __lowercase : List[Any] = value elif weight_type == "bias": __lowercase : Union[str, Any] = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __lowercase : Union[str, Any] = getattr(_UpperCAmelCase , _UpperCAmelCase ) __lowercase : List[str] = value else: __lowercase : List[Any] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : Optional[Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_UpperCAmelCase ): __lowercase : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowercase : Any = '''param''' if weight_type is not None and weight_type != "param": __lowercase : List[str] = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __lowercase : Optional[int] = '''.'''.join([key, hf_param_name] ) else: __lowercase : Any = key __lowercase : str = value if '''lm_head''' in full_key else value[0] a_ = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None ): __lowercase : List[Any] = False for key, mapped_key in MAPPING.items(): __lowercase : Union[str, Any] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __lowercase : List[Any] = True if "*" in mapped_key: __lowercase : int = name.split(_UpperCAmelCase )[0].split('''.''' )[-2] __lowercase : Optional[Any] = mapped_key.replace('''*''' , _UpperCAmelCase ) if "weight_g" in name: __lowercase : int = '''weight_g''' elif "weight_v" in name: __lowercase : Dict = '''weight_v''' elif "bias" in name: __lowercase : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowercase : Optional[int] = '''weight''' else: __lowercase : List[Any] = None if hf_dict is not None: rename_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) else: set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return is_used return is_used def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : List[str] = [] __lowercase : Tuple = fairseq_model.state_dict() __lowercase : Tuple = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __lowercase : List[str] = False if "conv_layers" in name: load_conv_layer( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , ) __lowercase : str = True else: __lowercase : Optional[int] = load_wavaveca_layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if not is_used: unused_weights.append(_UpperCAmelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : List[str] = full_name.split('''conv_layers.''' )[-1] __lowercase : Optional[Any] = name.split('''.''' ) __lowercase : Optional[Any] = int(items[0] ) __lowercase : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowercase : Optional[int] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowercase : List[Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowercase : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowercase : Optional[int] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_UpperCAmelCase ) @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=False ): if config_path is not None: __lowercase : List[Any] = WavaVecaConfig.from_pretrained(_UpperCAmelCase ) else: __lowercase : List[str] = WavaVecaConfig() if is_seq_class: __lowercase : Optional[int] = read_txt_into_dict(_UpperCAmelCase ) __lowercase : int = idalabel __lowercase : Union[str, Any] = WavaVecaForSequenceClassification(_UpperCAmelCase ) __lowercase : Tuple = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ) feature_extractor.save_pretrained(_UpperCAmelCase ) elif is_finetuned: if dict_path: __lowercase : Optional[Any] = Dictionary.load(_UpperCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase : List[str] = target_dict.pad_index __lowercase : Optional[int] = target_dict.bos_index __lowercase : List[str] = target_dict.eos_index __lowercase : List[str] = len(target_dict.symbols ) __lowercase : Tuple = os.path.join(_UpperCAmelCase , '''vocab.json''' ) if not os.path.isdir(_UpperCAmelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCAmelCase ) ) return os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) __lowercase : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __lowercase : List[Any] = 0 __lowercase : List[str] = 1 with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_UpperCAmelCase , _UpperCAmelCase ) __lowercase : Any = WavaVecaCTCTokenizer( _UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_UpperCAmelCase , ) __lowercase : str = True if config.feat_extract_norm == '''layer''' else False __lowercase : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ) __lowercase : int = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) processor.save_pretrained(_UpperCAmelCase ) __lowercase : Any = WavaVecaForCTC(_UpperCAmelCase ) else: __lowercase : Optional[Any] = WavaVecaForPreTraining(_UpperCAmelCase ) if is_finetuned or is_seq_class: __lowercase ,__lowercase ,__lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowercase : int = argparse.Namespace(task='''audio_pretraining''' ) __lowercase : Dict = fairseq.tasks.setup_task(_UpperCAmelCase ) __lowercase ,__lowercase ,__lowercase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_UpperCAmelCase ) __lowercase : Union[str, Any] = model[0].eval() recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , not is_finetuned ) hf_wavavec.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) a_ = parser.parse_args() a_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
249
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") UpperCAmelCase__ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , ) UpperCamelCase = field( default=1_0_2_4 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} ) def _lowerCamelCase ( self : str) -> List[Any]: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.') else: _UpperCAmelCase = self.train_file.split('.')[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." _UpperCAmelCase = self.validation_file.split('.')[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __lowerCAmelCase : UpperCamelCase = field( default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def A ( ) -> Optional[int]: '''simple docstring''' # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) datasets.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. _UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: _UpperCAmelCase = data_args.train_file.split('.' )[-1] _UpperCAmelCase = data_args.test_file.split('.' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." _UpperCAmelCase = data_args.test_file else: raise ValueError('Need either a GLUE task or a test file for `do_predict`.' ) for key in data_files.keys(): logger.info(F"load a local file for {key}: {data_files[key]}" ) if data_args.train_file.endswith('.csv' ): # Loading a dataset from local csv files _UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files _UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels _UpperCAmelCase = raw_datasets['train'].features['label'].names _UpperCAmelCase = len(_UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer _UpperCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , ) _UpperCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: _UpperCAmelCase = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _UpperCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. _UpperCAmelCase = {'Refused': 0, 'Entailed': 1} _UpperCAmelCase = {0: 'Refused', 1: 'Entailed'} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ): # Tokenize the texts def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ): _UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )] _UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd _UpperCAmelCase = examples['statement'] _UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) ) _UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ) _UpperCAmelCase = examples['label'] return result with training_args.main_process_first(desc='dataset map pre-processing' ): _UpperCAmelCase = raw_datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) _UpperCAmelCase = raw_datasets['train'] if data_args.max_train_samples is not None: _UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) _UpperCAmelCase = raw_datasets['validation'] if data_args.max_eval_samples is not None: _UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('--do_predict requires a test dataset' ) _UpperCAmelCase = raw_datasets['test'] if data_args.max_predict_samples is not None: _UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ): logger.info(F"Sample {index} of the training set: {train_dataset[index]}." ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCAmelCase : EvalPrediction ): _UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _UpperCAmelCase = default_data_collator elif training_args.fpaa: _UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) else: _UpperCAmelCase = None # Initialize our Trainer _UpperCAmelCase = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) _UpperCAmelCase = train_result.metrics _UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase ) ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , _UpperCAmelCase ) trainer.save_metrics('train' , _UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase ) _UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.log_metrics('eval' , _UpperCAmelCase ) trainer.save_metrics('eval' , _UpperCAmelCase ) if training_args.do_predict: logger.info('*** Predict ***' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. _UpperCAmelCase = predict_dataset.remove_columns('label' ) _UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) _UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' ) if trainer.is_world_process_zero(): with open(_UpperCAmelCase , 'w' ) as writer: logger.info('***** Predict Results *****' ) writer.write('index\tprediction\n' ) for index, item in enumerate(_UpperCAmelCase ): _UpperCAmelCase = label_list[item] writer.write(F"{index}\t{item}\n" ) _UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'} if training_args.push_to_hub: trainer.push_to_hub(**_UpperCAmelCase ) else: trainer.create_model_card(**_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
0
"""simple docstring""" def lowercase () -> Dict: '''simple docstring''' lowerCAmelCase = 0 for i in range(1 , 1_001 ): total += i**i return str(_UpperCAmelCase )[-10:] if __name__ == "__main__": print(solution())
155
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any: '''simple docstring''' _UpperCAmelCase = multiprocessing.Manager() _UpperCAmelCase = manager.list() _UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('timed out' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil _UpperCAmelCase = shutil.rmtree _UpperCAmelCase = os.rmdir _UpperCAmelCase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: _UpperCAmelCase = {} with swallow_io(): with time_limit(_UpperCAmelCase ): exec(_UpperCAmelCase , _UpperCAmelCase ) result.append('passed' ) except TimeoutException: result.append('timed out' ) except BaseException as e: result.append(F"failed: {e}" ) # Needed for cleaning up. _UpperCAmelCase = rmtree _UpperCAmelCase = rmdir _UpperCAmelCase = chdir @contextlib.contextmanager def A ( _UpperCAmelCase : Union[str, Any] ) -> Any: '''simple docstring''' def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ): raise TimeoutException('Timed out!' ) signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase ) signal.signal(signal.SIGALRM , _UpperCAmelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def A ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = WriteOnlyStringIO() with contextlib.redirect_stdout(_UpperCAmelCase ): with contextlib.redirect_stderr(_UpperCAmelCase ): with redirect_stdin(_UpperCAmelCase ): yield @contextlib.contextmanager def A ( ) -> Any: '''simple docstring''' with tempfile.TemporaryDirectory() as dirname: with chdir(_UpperCAmelCase ): yield dirname class __lowerCAmelCase ( A ): pass class __lowerCAmelCase ( io.StringIO ): def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any: """simple docstring""" raise OSError def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]: """simple docstring""" raise OSError def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]: """simple docstring""" raise OSError def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]: """simple docstring""" return False class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore UpperCamelCase = '''stdin''' @contextlib.contextmanager def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' if root == ".": yield return _UpperCAmelCase = os.getcwd() os.chdir(_UpperCAmelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str]=None ) -> Any: '''simple docstring''' if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins _UpperCAmelCase = None _UpperCAmelCase = None import os _UpperCAmelCase = '1' _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import shutil _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import subprocess _UpperCAmelCase = None # type: ignore _UpperCAmelCase = None import sys _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None
339
0
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case_ ( __A ,__A ,unittest.TestCase ): __A : Union[str, Any] = StableDiffusionDiffEditPipeline __A : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} __A : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} __A : Dict = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __A : Any = frozenset([] ) def __UpperCamelCase ( self : List[str] ) -> Optional[Any]: torch.manual_seed(0 ) lowercase__ : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , ) lowercase__ : int = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , ) lowercase__ : str = DDIMInverseScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_zero=lowercase_ , ) torch.manual_seed(0 ) lowercase__ : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) lowercase__ : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) lowercase__ : str = CLIPTextModel(lowercase_ ) lowercase__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowercase__ : List[str] = { "unet": unet, "scheduler": scheduler, "inverse_scheduler": inverse_scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __UpperCamelCase ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple=0 ) -> str: lowercase__ : Union[str, Any] = floats_tensor((1, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) lowercase__ : Optional[int] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) if str(lowercase_ ).startswith("mps" ): lowercase__ : List[Any] = torch.manual_seed(lowercase_ ) else: lowercase__ : List[str] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase__ : Optional[int] = { "prompt": "a dog and a newt", "mask_image": mask, "image_latents": latents, "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str]=0 ) -> str: lowercase__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) lowercase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ : List[str] = Image.fromarray(np.uinta(lowercase_ ) ).convert("RGB" ) if str(lowercase_ ).startswith("mps" ): lowercase__ : Optional[Any] = torch.manual_seed(lowercase_ ) else: lowercase__ : int = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase__ : Optional[Any] = { "image": image, "source_prompt": "a cat and a frog", "target_prompt": "a dog and a newt", "generator": generator, "num_inference_steps": 2, "num_maps_per_mask": 2, "mask_encode_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __UpperCamelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : str=0 ) -> int: lowercase__ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) lowercase__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ : List[Any] = Image.fromarray(np.uinta(lowercase_ ) ).convert("RGB" ) if str(lowercase_ ).startswith("mps" ): lowercase__ : List[Any] = torch.manual_seed(lowercase_ ) else: lowercase__ : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase__ : int = { "image": image, "prompt": "a cat and a frog", "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "decode_latents": True, "output_type": "numpy", } return inputs def __UpperCamelCase ( self : Optional[int] ) -> int: if not hasattr(self.pipeline_class , "_optional_components" ): return lowercase__ : Tuple = self.get_dummy_components() lowercase__ : Optional[int] = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(lowercase_ , lowercase_ , lowercase_ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowercase__ : List[str] = self.get_dummy_inputs(lowercase_ ) lowercase__ : Optional[int] = pipe(**lowercase_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowercase_ ) lowercase__ : Tuple = self.pipeline_class.from_pretrained(lowercase_ ) pipe_loaded.to(lowercase_ ) pipe_loaded.set_progress_bar_config(disable=lowercase_ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(lowercase_ , lowercase_ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , ) lowercase__ : str = self.get_dummy_inputs(lowercase_ ) lowercase__ : Tuple = pipe_loaded(**lowercase_ )[0] lowercase__ : List[Any] = np.abs(output - output_loaded ).max() self.assertLess(lowercase_ , 1E-4 ) def __UpperCamelCase ( self : List[str] ) -> Optional[Any]: lowercase__ : List[Any] = "cpu" lowercase__ : Any = self.get_dummy_components() lowercase__ : Union[str, Any] = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase__ : List[Any] = self.get_dummy_mask_inputs(lowercase_ ) lowercase__ : int = pipe.generate_mask(**lowercase_ ) lowercase__ : int = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowercase__ : List[str] = np.array([0] * 9 ) lowercase__ : str = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_ , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def __UpperCamelCase ( self : str ) -> str: lowercase__ : Union[str, Any] = "cpu" lowercase__ : Dict = self.get_dummy_components() lowercase__ : List[Any] = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase__ : List[str] = self.get_dummy_inversion_inputs(lowercase_ ) lowercase__ : Optional[int] = pipe.invert(**lowercase_ ).images lowercase__ : List[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowercase__ : Union[str, Any] = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) lowercase__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_ , 1E-3 ) def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def __UpperCamelCase ( self : Dict ) -> Dict: lowercase__ : Tuple = "cpu" lowercase__ : List[str] = self.get_dummy_components() lowercase__ : Dict = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"} lowercase__ : Optional[int] = DPMSolverMultistepScheduler(**lowercase_ ) lowercase__ : Optional[int] = DPMSolverMultistepInverseScheduler(**lowercase_ ) lowercase__ : Tuple = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase__ : Optional[Any] = self.get_dummy_inversion_inputs(lowercase_ ) lowercase__ : Tuple = pipe.invert(**lowercase_ ).images lowercase__ : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowercase__ : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) lowercase__ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_ , 1E-3 ) @require_torch_gpu @slow class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : str ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def __UpperCamelCase ( cls : int ) -> List[Any]: lowercase__ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) lowercase__ : Any = raw_image.convert("RGB" ).resize((7_68, 7_68) ) lowercase__ : List[Any] = raw_image def __UpperCamelCase ( self : Optional[int] ) -> List[str]: lowercase__ : str = torch.manual_seed(0 ) lowercase__ : Optional[Any] = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=lowercase_ , torch_dtype=torch.floataa ) lowercase__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config ) lowercase__ : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowercase_ ) lowercase__ : Optional[Any] = "a bowl of fruit" lowercase__ : Tuple = "a bowl of pears" lowercase__ : List[str] = pipe.generate_mask( image=self.raw_image , source_prompt=lowercase_ , target_prompt=lowercase_ , generator=lowercase_ , ) lowercase__ : Optional[int] = pipe.invert( prompt=lowercase_ , image=self.raw_image , inpaint_strength=0.7 , generator=lowercase_ ).latents lowercase__ : Any = pipe( prompt=lowercase_ , mask_image=lowercase_ , image_latents=lowercase_ , generator=lowercase_ , negative_prompt=lowercase_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0] lowercase__ : Dict = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5E-1 def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: lowercase__ : Any = torch.manual_seed(0 ) lowercase__ : Optional[int] = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=lowercase_ , torch_dtype=torch.floataa ) lowercase__ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowercase__ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowercase_ ) lowercase__ : Dict = "a bowl of fruit" lowercase__ : Tuple = "a bowl of pears" lowercase__ : str = pipe.generate_mask( image=self.raw_image , source_prompt=lowercase_ , target_prompt=lowercase_ , generator=lowercase_ , ) lowercase__ : List[str] = pipe.invert( prompt=lowercase_ , image=self.raw_image , inpaint_strength=0.7 , generator=lowercase_ , num_inference_steps=25 , ).latents lowercase__ : Optional[Any] = pipe( prompt=lowercase_ , mask_image=lowercase_ , image_latents=lowercase_ , generator=lowercase_ , negative_prompt=lowercase_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0] lowercase__ : str = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5E-1
87
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str: '''simple docstring''' try: _UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: _UpperCAmelCase = strtobool(_UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False) def A ( _UpperCAmelCase : List[str] ) -> List[str]: '''simple docstring''' return unittest.skip('Test was skipped' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> str: '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> str: '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> Dict: '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> List[str]: '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : str ) -> str: '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> str: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> int: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> Any: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict: '''simple docstring''' if test_case is None: return partial(_UpperCAmelCase , version=_UpperCAmelCase ) return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> int: '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase ) UpperCAmelCase__ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def A ( _UpperCAmelCase : List[str] ) -> Any: '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase ) class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase = True @classmethod def _lowerCamelCase ( cls : List[Any]) -> Tuple: """simple docstring""" _UpperCAmelCase = tempfile.mkdtemp() @classmethod def _lowerCamelCase ( cls : Union[str, Any]) -> str: """simple docstring""" if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def _lowerCamelCase ( self : List[str]) -> List[Any]: """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir).glob('**/*'): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A) class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Dict) -> Tuple: """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple: """simple docstring""" _UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def A ( _UpperCAmelCase : List[Any] ) -> int: '''simple docstring''' _UpperCAmelCase = AcceleratorState() _UpperCAmelCase = tensor[None].clone().to(state.device ) _UpperCAmelCase = gather(_UpperCAmelCase ).cpu() _UpperCAmelCase = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _UpperCAmelCase ): return False return True class __lowerCAmelCase : def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]: """simple docstring""" _UpperCAmelCase = returncode _UpperCAmelCase = stdout _UpperCAmelCase = stderr async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]: '''simple docstring''' while True: _UpperCAmelCase = await stream.readline() if line: callback(_UpperCAmelCase ) else: break async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput: '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(_UpperCAmelCase ) ) _UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _UpperCAmelCase = [] _UpperCAmelCase = [] def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ): _UpperCAmelCase = line.decode('utf-8' ).rstrip() sink.append(_UpperCAmelCase ) if not quiet: print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ), ] , timeout=_UpperCAmelCase , ) return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase ) def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput: '''simple docstring''' _UpperCAmelCase = asyncio.get_event_loop() _UpperCAmelCase = loop.run_until_complete( _stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) ) _UpperCAmelCase = ' '.join(_UpperCAmelCase ) if result.returncode > 0: _UpperCAmelCase = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class __lowerCAmelCase ( A ): pass def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple: '''simple docstring''' try: _UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_UpperCAmelCase , 'decode' ): _UpperCAmelCase = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
339
0
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Optional[int] = filter(lambda snake_case__ : p.requires_grad , model.parameters() ) __UpperCamelCase : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] ) return params _lowerCAmelCase = logging.getLogger(__name__) def __lowerCAmelCase ( snake_case__ , snake_case__ ): if metric == "rouge2": __UpperCamelCase : Optional[Any] = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": __UpperCamelCase : int = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": __UpperCamelCase : Optional[Any] = "{val_avg_em:.4f}-{step_count}" elif metric == "loss": __UpperCamelCase : List[Any] = "{val_avg_loss:.4f}-{step_count}" else: raise NotImplementedError( F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this" " function." ) __UpperCamelCase : int = ModelCheckpoint( dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode="max" , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def __lowerCAmelCase ( snake_case__ , snake_case__ ): return EarlyStopping( monitor=F"val_{metric}" , mode="min" if "loss" in metric else "max" , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , ) class A ( pl.Callback ): '''simple docstring''' def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> str: __UpperCamelCase : Optional[int] = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_UpperCAmelCase ) @rank_zero_only def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ) -> None: logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" ) __UpperCamelCase : Optional[Any] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results __UpperCamelCase : Any = Path(pl_module.hparams.output_dir ) if type_path == "test": __UpperCamelCase : Tuple = od / "test_results.txt" __UpperCamelCase : int = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __UpperCamelCase : Optional[int] = od / f"{type_path}_results/{trainer.global_step:05d}.txt" __UpperCamelCase : Optional[Any] = od / f"{type_path}_generations/{trainer.global_step:05d}.txt" results_file.parent.mkdir(exist_ok=_UpperCAmelCase ) generations_file.parent.mkdir(exist_ok=_UpperCAmelCase ) with open(_UpperCAmelCase , "a+" ) as writer: for key in sorted(_UpperCAmelCase ): if key in ["log", "progress_bar", "preds"]: continue __UpperCamelCase : Optional[Any] = metrics[key] if isinstance(_UpperCAmelCase , torch.Tensor ): __UpperCamelCase : Optional[Any] = val.item() __UpperCamelCase : List[Any] = f"{key}: {val:.6f}\n" writer.write(_UpperCAmelCase ) if not save_generations: return if "preds" in metrics: __UpperCamelCase : Optional[int] = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(_UpperCAmelCase ) @rank_zero_only def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> str: try: __UpperCamelCase : str = pl_module.model.model.num_parameters() except AttributeError: __UpperCamelCase : Any = pl_module.model.num_parameters() __UpperCamelCase : List[Any] = count_trainable_parameters(_UpperCAmelCase ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} ) @rank_zero_only def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_UpperCAmelCase , _UpperCAmelCase , "test" ) @rank_zero_only def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
298
from __future__ import annotations UpperCAmelCase__ = list[list[int]] # assigning initial values to the grid UpperCAmelCase__ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase__ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool: '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def A ( _UpperCAmelCase : Matrix ) -> Matrix | None: '''simple docstring''' if location := find_empty_location(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): _UpperCAmelCase = digit if sudoku(_UpperCAmelCase ) is not None: return grid _UpperCAmelCase = 0 return None def A ( _UpperCAmelCase : Matrix ) -> None: '''simple docstring''' for row in grid: for cell in row: print(_UpperCAmelCase , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") UpperCAmelCase__ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
339
0
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def _lowerCamelCase( lowercase__ ) -> List[Any]: '''simple docstring''' __lowercase= SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __lowercase= 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: __lowercase= 4 __lowercase= 4_8 __lowercase= 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __lowercase= [6, 6, 6, 6] __lowercase= 6_0 __lowercase= [6, 6, 6, 6] __lowercase= 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __lowercase= 4 __lowercase= 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: __lowercase= 1 __lowercase= 1 __lowercase= 1_2_6 __lowercase= 7 __lowercase= 255.0 __lowercase= '' return config def _lowerCamelCase( lowercase__ , lowercase__ ) -> int: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: __lowercase= name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __lowercase= name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: __lowercase= name.replace('layers' , 'encoder.stages' ) if "residual_group.blocks" in name: __lowercase= name.replace('residual_group.blocks' , 'layers' ) if "attn.proj" in name: __lowercase= name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: __lowercase= name.replace('attn' , 'attention.self' ) if "norm1" in name: __lowercase= name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __lowercase= name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __lowercase= name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __lowercase= name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: __lowercase= name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: __lowercase= name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: __lowercase= name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: __lowercase= name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: __lowercase= name.replace('patch_embed.proj' , 'patch_embed.projection' ) if name == "norm.weight": __lowercase= 'layernorm.weight' if name == "norm.bias": __lowercase= 'layernorm.bias' if "conv_first" in name: __lowercase= name.replace('conv_first' , 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: __lowercase= name.replace('conv_last' , 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: __lowercase= name.replace('conv_before_upsample.0' , 'conv_before_upsample' ) if "upsample.0" in name: __lowercase= name.replace('upsample.0' , 'upsample.convolution_0' ) if "upsample.2" in name: __lowercase= name.replace('upsample.2' , 'upsample.convolution_1' ) __lowercase= 'upsample.' + name elif config.upsampler == "pixelshuffledirect": __lowercase= name.replace('upsample.0.weight' , 'upsample.conv.weight' ) __lowercase= name.replace('upsample.0.bias' , 'upsample.conv.bias' ) else: pass else: __lowercase= 'swin2sr.' + name return name def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): __lowercase= orig_state_dict.pop(_UpperCAmelCase ) if "qkv" in key: __lowercase= key.split('.' ) __lowercase= int(key_split[1] ) __lowercase= int(key_split[4] ) __lowercase= config.embed_dim if "weight" in key: __lowercase= val[:dim, :] __lowercase= val[dim : dim * 2, :] __lowercase= val[-dim:, :] else: __lowercase= val[:dim] __lowercase= val[dim : dim * 2] __lowercase= val[-dim:] pass else: __lowercase= val return orig_state_dict def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' __lowercase= get_config(_UpperCAmelCase ) __lowercase= SwinaSRForImageSuperResolution(_UpperCAmelCase ) model.eval() __lowercase= torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' ) __lowercase= convert_state_dict(_UpperCAmelCase , _UpperCAmelCase ) __lowercase, __lowercase= model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'Unexpected key {key} in state_dict' ) # verify values __lowercase= 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' __lowercase= Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' ) __lowercase= SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values __lowercase= 1_2_6 if 'Jpeg' in checkpoint_url else 2_5_6 __lowercase= Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __lowercase= transforms(_UpperCAmelCase ).unsqueeze(0 ) if config.num_channels == 1: __lowercase= pixel_values[:, 0, :, :].unsqueeze(1 ) __lowercase= model(_UpperCAmelCase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: __lowercase= torch.Size([1, 3, 5_1_2, 5_1_2] ) __lowercase= torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __lowercase= torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) __lowercase= torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here __lowercase= torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) __lowercase= torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __lowercase= torch.Size([1, 3, 5_1_2, 5_1_2] ) __lowercase= torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __lowercase= torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) __lowercase= torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1E-3 ) print('Looks ok!' ) __lowercase= { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } __lowercase= url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(_UpperCAmelCase ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: model.push_to_hub(F'caidas/{model_name}' ) processor.push_to_hub(F'caidas/{model_name}' ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') lowerCAmelCase = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
295
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : List[Any]) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Value('string' , id='sequence'), }) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]: """simple docstring""" import nltk nltk.download('wordnet') if NLTK_VERSION >= version.Version('3.6.5'): nltk.download('punkt') if NLTK_VERSION >= version.Version('3.6.6'): nltk.download('omw-1.4') def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any: """simple docstring""" if NLTK_VERSION >= version.Version('3.6.5'): _UpperCAmelCase = [ meteor_score.single_meteor_score( word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] else: _UpperCAmelCase = [ meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] return {"meteor": np.mean(A)}
339
0
'''simple docstring''' import argparse import os import re lowerCAmelCase_ : Optional[int] = 'src/diffusers' # Pattern that looks at the indentation in a line. lowerCAmelCase_ : Any = re.compile(R'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase_ : Tuple = re.compile(R'^\s*\"([^\"]+)\":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase_ : List[str] = re.compile(R'^\s*_import_structure\[\"([^\"]+)\"\]') # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase_ : Optional[Any] = re.compile(R'^\s*\"([^\"]+)\",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase_ : List[str] = re.compile(R'\[([^\]]+)\]') def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict: _a = _re_indent.search(_UpperCAmelCase ) return "" if search is None else search.groups()[0] def _lowerCamelCase ( lowercase : Optional[int] , lowercase : str="" , lowercase : Dict=None , lowercase : Union[str, Any]=None ) -> str: _a = 0 _a = code.split("\n" ) if start_prompt is not None: while not lines[index].startswith(_UpperCAmelCase ): index += 1 _a = ["\n".join(lines[:index] )] else: _a = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _a = [lines[index]] index += 1 while index < len(_UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(_UpperCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ): current_block.append(lines[index] ) blocks.append("\n".join(_UpperCAmelCase ) ) if index < len(_UpperCAmelCase ) - 1: _a = [lines[index + 1]] index += 1 else: _a = [] else: blocks.append("\n".join(_UpperCAmelCase ) ) _a = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_UpperCAmelCase ) > 0: blocks.append("\n".join(_UpperCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_UpperCAmelCase ): blocks.append("\n".join(lines[index:] ) ) return blocks def _lowerCamelCase ( lowercase : Any ) -> Tuple: def _inner(lowercase : Optional[int] ): return key(_UpperCAmelCase ).lower().replace("_" , "" ) return _inner def _lowerCamelCase ( lowercase : str , lowercase : int=None ) -> Optional[Any]: # If no key is provided, we use a noop. def noop(lowercase : Optional[int] ): return x if key is None: _a = noop # Constants are all uppercase, they go first. _a = [obj for obj in objects if key(_UpperCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _a = [obj for obj in objects if key(_UpperCAmelCase )[0].isupper() and not key(_UpperCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. _a = [obj for obj in objects if not key(_UpperCAmelCase )[0].isupper()] _a = ignore_underscore(_UpperCAmelCase ) return sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]: # This inner function sort imports between [ ]. def _replace(lowercase : Union[str, Any] ): _a = match.groups()[0] if "," not in imports: return F'[{imports}]' _a = [part.strip().replace("\"" , "" ) for part in imports.split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _a = keys[:-1] return "[" + ", ".join([F'\"{k}\"' for k in sort_objects(_UpperCAmelCase )] ) + "]" _a = import_statement.split("\n" ) if len(_UpperCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _a = 2 if lines[1].strip() == "[" else 1 _a = [(i, _re_strip_line.search(_UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _a = sort_objects(_UpperCAmelCase , key=lambda lowercase : x[1] ) _a = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_UpperCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _a = _re_bracket_content.sub(_replace , lines[1] ) else: _a = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _a = keys[:-1] _a = get_indent(lines[1] ) + ", ".join([F'\"{k}\"' for k in sort_objects(_UpperCAmelCase )] ) return "\n".join(_UpperCAmelCase ) else: # Finally we have to deal with imports fitting on one line _a = _re_bracket_content.sub(_replace , _UpperCAmelCase ) return import_statement def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Tuple=True ) -> Optional[int]: with open(_UpperCAmelCase , "r" ) as f: _a = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _a = split_code_in_indented_blocks( _UpperCAmelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_UpperCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _a = main_blocks[block_idx] _a = block.split("\n" ) # Get to the start of the imports. _a = 0 while line_idx < len(_UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _a = len(_UpperCAmelCase ) else: line_idx += 1 if line_idx >= len(_UpperCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. _a = "\n".join(block_lines[line_idx:-1] ) _a = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _a = split_code_in_indented_blocks(_UpperCAmelCase , indent_level=_UpperCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend _a = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _a = [(pattern.search(_UpperCAmelCase ).groups()[0] if pattern.search(_UpperCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _a = [(i, key) for i, key in enumerate(_UpperCAmelCase ) if key is not None] _a = [x[0] for x in sorted(_UpperCAmelCase , key=lambda lowercase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _a = 0 _a = [] for i in range(len(_UpperCAmelCase ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: _a = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(_UpperCAmelCase ) count += 1 # And we put our main block back together with its first and last line. _a = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(_UpperCAmelCase ): if check_only: return True else: print(F'Overwriting {file}.' ) with open(_UpperCAmelCase , "w" ) as f: f.write("\n".join(_UpperCAmelCase ) ) def _lowerCamelCase ( lowercase : List[Any]=True ) -> Tuple: _a = [] for root, _, files in os.walk(_UpperCAmelCase ): if "__init__.py" in files: _a = sort_imports(os.path.join(_UpperCAmelCase , "__init__.py" ) , check_only=_UpperCAmelCase ) if result: _a = [os.path.join(_UpperCAmelCase , "__init__.py" )] if len(_UpperCAmelCase ) > 0: raise ValueError(F'Would overwrite {len(_UpperCAmelCase )} files, run `make style`.' ) if __name__ == "__main__": lowerCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') lowerCAmelCase_ : Any = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
63
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration UpperCAmelCase__ = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' _UpperCAmelCase = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def A ( _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = list(s_dict.keys() ) for key in keys: _UpperCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: _UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase ) print(F"{key} -> {new_key}" ) _UpperCAmelCase = s_dict.pop(_UpperCAmelCase ) return s_dict def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = emb.weight.shape _UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) _UpperCAmelCase = emb.weight.data return lin_layer def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes: '''simple docstring''' os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) _UpperCAmelCase = os.path.basename(_UpperCAmelCase ) _UpperCAmelCase = url.split('/' )[-2] _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ): raise RuntimeError(F"{download_target} exists and is not a regular file" ) if os.path.isfile(_UpperCAmelCase ): _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop: while True: _UpperCAmelCase = source.read(8_192 ) if not buffer: break output.write(_UpperCAmelCase ) loop.update(len(_UpperCAmelCase ) ) _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' if ".pt" not in checkpoint_path: _UpperCAmelCase = _download(_MODELS[checkpoint_path] ) else: _UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' ) _UpperCAmelCase = original_checkpoint['dims'] _UpperCAmelCase = original_checkpoint['model_state_dict'] _UpperCAmelCase = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(_UpperCAmelCase ) rename_keys(_UpperCAmelCase ) _UpperCAmelCase = True _UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0] _UpperCAmelCase = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) _UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F" but all the following weights are missing {missing}" ) if tie_embeds: _UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCAmelCase = proj_out_weights model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
339
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {"vocab_file": "sentencepiece.bpe.model"} __magic_name__ = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", } } __magic_name__ = { "camembert-base": 512, } __magic_name__ = "▁" class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : List[str] = VOCAB_FILES_NAMES __lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP __lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : int = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=["<s>NOTUSED", "</s>NOTUSED"] , lowerCAmelCase__ = None , **lowerCAmelCase__ , ): __SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token __SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(lowerCAmelCase__)) __SCREAMING_SNAKE_CASE = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __SCREAMING_SNAKE_CASE = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3} __SCREAMING_SNAKE_CASE = len(self.fairseq_tokens_to_ids) __SCREAMING_SNAKE_CASE = len(self.sp_model) + len(self.fairseq_tokens_to_ids) __SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] __SCREAMING_SNAKE_CASE = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__)) + [1] return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1] def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None): __SCREAMING_SNAKE_CASE = [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def snake_case_ ( self): return len(self.fairseq_tokens_to_ids) + len(self.sp_model) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def snake_case_ ( self , lowerCAmelCase__): return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(lowerCAmelCase__) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = """""" __SCREAMING_SNAKE_CASE = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCAmelCase__) + token __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = False out_string += self.sp_model.decode(lowerCAmelCase__) return out_string.strip() def __getstate__( self): __SCREAMING_SNAKE_CASE = self.__dict__.copy() __SCREAMING_SNAKE_CASE = None return state def __setstate__( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None): if not os.path.isdir(lowerCAmelCase__): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return __SCREAMING_SNAKE_CASE = os.path.join( lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , lowerCAmelCase__) elif not os.path.isfile(self.vocab_file): with open(lowerCAmelCase__ , """wb""") as fi: __SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__) return (out_vocab_file,)
100
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ): UpperCamelCase = None UpperCamelCase = None class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ): UpperCamelCase = datasets.Audio() UpperCamelCase = '''audio''' UpperCamelCase = AudioFolderConfig UpperCamelCase = 42 # definition at the bottom of the script UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' ) UpperCAmelCase__ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] UpperCAmelCase__ = AUDIO_EXTENSIONS
339
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __snake_case ( unittest.TestCase ): def __a ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() # fmt: off SCREAMING_SNAKE_CASE__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on SCREAMING_SNAKE_CASE__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) SCREAMING_SNAKE_CASE__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] SCREAMING_SNAKE_CASE__ = {"""unk_token""": """<unk>"""} SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_lowercase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_lowercase ) ) SCREAMING_SNAKE_CASE__ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], """image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , _lowercase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(_lowercase , _lowercase ) def __a ( self : Union[str, Any] , **_lowercase : Optional[Any] ): """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowercase ) def __a ( self : Optional[int] , **_lowercase : Tuple ): """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase ) def __a ( self : Union[str, Any] , **_lowercase : Any ): """simple docstring""" return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowercase ) def __a ( self : Dict ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def __a ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __a ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase ) processor_slow.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase ) SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase ) processor_fast.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowercase ) self.assertIsInstance(processor_fast.tokenizer , _lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowercase ) self.assertIsInstance(processor_fast.image_processor , _lowercase ) def __a ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 ) SCREAMING_SNAKE_CASE__ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowercase ) def __a ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase ) SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ = image_processor(_lowercase , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ = processor(images=_lowercase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __a ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase ) SCREAMING_SNAKE_CASE__ = """lower newer""" SCREAMING_SNAKE_CASE__ = processor(text=_lowercase ) SCREAMING_SNAKE_CASE__ = tokenizer(_lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __a ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase ) SCREAMING_SNAKE_CASE__ = """lower newer""" SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ = processor(text=_lowercase , images=_lowercase ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(_lowercase ): processor() def __a ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase ) SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE__ = processor.batch_decode(_lowercase ) SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) def __a ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase ) SCREAMING_SNAKE_CASE__ = """lower newer""" SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ = processor(text=_lowercase , images=_lowercase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
219
import sys from collections import defaultdict class __lowerCAmelCase : def __init__( self : int) -> str: """simple docstring""" _UpperCAmelCase = [] def _lowerCamelCase ( self : Any , A : List[str]) -> int: """simple docstring""" return self.node_position[vertex] def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = pos def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCAmelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCAmelCase = 2 * start + 1 else: _UpperCAmelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child] _UpperCAmelCase , _UpperCAmelCase = ( heap[start], positions[start], ) _UpperCAmelCase , _UpperCAmelCase = temp, tempa _UpperCAmelCase = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , A) self.top_to_bottom(A , A , A , A) def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any: """simple docstring""" _UpperCAmelCase = position[index] while index != 0: _UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _UpperCAmelCase = heap[parent] _UpperCAmelCase = position[parent] self.set_position(position[parent] , A) else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , A) break _UpperCAmelCase = parent else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , 0) def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str: """simple docstring""" _UpperCAmelCase = len(A) // 2 - 1 for i in range(A , -1 , -1): self.top_to_bottom(A , A , len(A) , A) def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = positions[0] _UpperCAmelCase = sys.maxsize self.top_to_bottom(A , 0 , len(A) , A) return temp def A ( _UpperCAmelCase : int ) -> Any: '''simple docstring''' _UpperCAmelCase = Heap() _UpperCAmelCase = [0] * len(_UpperCAmelCase ) _UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCAmelCase = [] for vertex in range(len(_UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_UpperCAmelCase ) heap.node_position.append(_UpperCAmelCase ) _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCAmelCase = 0 _UpperCAmelCase = distance heap.heapify(_UpperCAmelCase , _UpperCAmelCase ) for _ in range(1 , len(_UpperCAmelCase ) ): _UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCAmelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_UpperCAmelCase )] ): _UpperCAmelCase = distance heap.bottom_to_top( _UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
339
0
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors _SCREAMING_SNAKE_CASE : Optional[Any] = load_file(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: _SCREAMING_SNAKE_CASE : Union[str, Any] = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) _SCREAMING_SNAKE_CASE : List[Any] = pipeline.text_encoder else: _SCREAMING_SNAKE_CASE : Union[str, Any] = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.unet # find the target layer _SCREAMING_SNAKE_CASE : List[str] = layer_infos.pop(0 ) while len(_UpperCAmelCase ) > -1: try: _SCREAMING_SNAKE_CASE : Any = curr_layer.__getattr__(_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: _SCREAMING_SNAKE_CASE : Union[str, Any] = layer_infos.pop(0 ) elif len(_UpperCAmelCase ) == 0: break except Exception: if len(_UpperCAmelCase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = layer_infos.pop(0 ) _SCREAMING_SNAKE_CASE : Optional[int] = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) ) pair_keys.append(_UpperCAmelCase ) else: pair_keys.append(_UpperCAmelCase ) pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: _SCREAMING_SNAKE_CASE : Dict = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) _SCREAMING_SNAKE_CASE : int = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase , _UpperCAmelCase ).unsqueeze(2 ).unsqueeze(3 ) else: _SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[0]].to(torch.floataa ) _SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase , _UpperCAmelCase ) # update visited list for item in pair_keys: visited.append(_UpperCAmelCase ) return pipeline if __name__ == "__main__": UpperCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument( '--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.' ) parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors' ) parser.add_argument( '--lora_prefix_text_encoder', default='lora_te', type=str, help='The prefix of text encoder weight in safetensors', ) parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW') parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.' ) parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') UpperCAmelCase_ : Optional[Any] = parser.parse_args() UpperCAmelCase_ : Dict = args.base_model_path UpperCAmelCase_ : str = args.checkpoint_path UpperCAmelCase_ : Optional[Any] = args.dump_path UpperCAmelCase_ : Optional[Any] = args.lora_prefix_unet UpperCAmelCase_ : str = args.lora_prefix_text_encoder UpperCAmelCase_ : Dict = args.alpha UpperCAmelCase_ : Dict = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) UpperCAmelCase_ : str = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
200
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]: '''simple docstring''' # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 _UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1 _UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple _UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _UpperCAmelCase = logits[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=0 ) _UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 ) _UpperCAmelCase = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] ) _UpperCAmelCase = tokenizer.mask_token _UpperCAmelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): _UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(_UpperCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base") UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() UpperCAmelCase__ = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
339
0
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) A_ :List[str] = '''\\n Text data.\n Second line of data.''' A_ :Any = '''file''' @pytest.fixture(scope='session' ) def A ( a_ ) -> Tuple: __UpperCamelCase : Tuple =tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd') __UpperCamelCase : int =bytes(_UpperCAmelCase ,'utf-8' ) with zstd.open(_UpperCAmelCase ,'wb' ) as f: f.write(_UpperCAmelCase ) return path @pytest.fixture def A ( a_ ) -> Dict: with open(os.path.join(tmpfs.local_root_dir ,_UpperCAmelCase ) ,'w' ) as f: f.write(_UpperCAmelCase ) return FILE_PATH @pytest.mark.parametrize('compression_format' ,['gzip', 'xz', 'zstd'] ) def A ( a_ ,a_ ,a_ ,a_ ,a_ ,a_ ) -> str: __UpperCamelCase : Dict ={'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path} __UpperCamelCase : Optional[int] =input_paths[compression_format] __UpperCamelCase : List[Any] =tmp_path / 'cache' __UpperCamelCase : int =DownloadConfig(cache_dir=_UpperCAmelCase ,extract_compressed_file=_UpperCAmelCase ) __UpperCamelCase : str =cached_path(_UpperCAmelCase ,download_config=_UpperCAmelCase ) with open(_UpperCAmelCase ) as f: __UpperCamelCase : Tuple =f.read() with open(_UpperCAmelCase ) as f: __UpperCamelCase : List[Any] =f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('default_extracted' ,[True, False] ) @pytest.mark.parametrize('default_cache_dir' ,[True, False] ) def A ( a_ ,a_ ,a_ ,a_ ,a_ ) -> Dict: __UpperCamelCase : Tuple ='custom_cache' __UpperCamelCase : List[Any] ='custom_extracted_dir' __UpperCamelCase : Dict =tmp_path / 'custom_extracted_path' if default_extracted: __UpperCamelCase : Tuple =('downloads' if default_cache_dir else custom_cache_dir, 'extracted') else: monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' ,_UpperCAmelCase ) monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' ,str(_UpperCAmelCase ) ) __UpperCamelCase : Any =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) __UpperCamelCase : Dict =xz_file __UpperCamelCase : List[str] =( DownloadConfig(extract_compressed_file=_UpperCAmelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir ,extract_compressed_file=_UpperCAmelCase ) ) __UpperCamelCase : int =cached_path(_UpperCAmelCase ,download_config=_UpperCAmelCase ) assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected def A ( a_ ) -> Optional[int]: __UpperCamelCase : Dict =str(Path(_UpperCAmelCase ).resolve() ) assert cached_path(_UpperCAmelCase ) == text_file # relative path __UpperCamelCase : Dict =str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_UpperCAmelCase ) == text_file def A ( a_ ) -> Optional[int]: __UpperCamelCase : List[str] =str(tmp_path.resolve() / '__missing_file__.txt' ) with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) # relative path __UpperCamelCase : Any ='./__missing_file__.txt' with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) def A ( a_ ) -> List[Any]: __UpperCamelCase : str =get_from_cache(F'tmp://{tmpfs_file}' ) with open(_UpperCAmelCase ) as f: __UpperCamelCase : Optional[int] =f.read() assert output_file_content == FILE_CONTENT @patch('datasets.config.HF_DATASETS_OFFLINE' ,_UpperCAmelCase ) def A ( ) -> Dict: with pytest.raises(_UpperCAmelCase ): cached_path('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' ,_UpperCAmelCase ) def A ( a_ ) -> Union[str, Any]: __UpperCamelCase : Optional[int] =tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(_UpperCAmelCase ): http_get('https://huggingface.co' ,temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): http_head('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' ,_UpperCAmelCase ) def A ( a_ ) -> int: __UpperCamelCase : str =tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(_UpperCAmelCase ): ftp_get('ftp://huggingface.co' ,temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): ftp_head('ftp://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' ,_UpperCAmelCase ) def A ( a_ ) -> int: __UpperCamelCase : Dict =tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(_UpperCAmelCase ): fsspec_get('s3://huggingface.co' ,temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): fsspec_head('s3://huggingface.co' )
71
import math import unittest def A ( _UpperCAmelCase : int ) -> bool: '''simple docstring''' assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(11)) self.assertTrue(is_prime(13)) self.assertTrue(is_prime(17)) self.assertTrue(is_prime(19)) self.assertTrue(is_prime(23)) self.assertTrue(is_prime(29)) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" with self.assertRaises(A): is_prime(-19) self.assertFalse( is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , ) self.assertFalse( is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , ) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
339
0
'''simple docstring''' def _lowerCAmelCase ( _UpperCamelCase : int ) -> int: """simple docstring""" if divisor % 5 == 0 or divisor % 2 == 0: return 0 _SCREAMING_SNAKE_CASE =1 _SCREAMING_SNAKE_CASE =1 while repunit: _SCREAMING_SNAKE_CASE =(10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def _lowerCAmelCase ( _UpperCamelCase : int = 1_00_00_00 ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE =limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(_UpperCAmelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'''{solution() = }''')
47
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : str) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=A , hypotheses=A , min_len=A , max_len=A) }
339
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json', 'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json', 'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json', 'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json', 'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json', 'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json', 'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json', 'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json', 'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json', 'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json', } class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="rwkv" UpperCamelCase ={"max_position_embeddings": "context_length"} def __init__( self , UpperCamelCase_=5_02_77 , UpperCamelCase_=10_24 , UpperCamelCase_=40_96 , UpperCamelCase_=32 , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=1E-5 , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=6 , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ) -> Optional[int]: __lowercase : str = vocab_size __lowercase : Union[str, Any] = context_length __lowercase : List[str] = hidden_size __lowercase : int = num_hidden_layers __lowercase : Tuple = attention_hidden_size if attention_hidden_size is not None else hidden_size __lowercase : List[Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size __lowercase : Dict = layer_norm_epsilon __lowercase : int = rescale_every __lowercase : str = use_cache __lowercase : Tuple = bos_token_id __lowercase : str = eos_token_id super().__init__( tie_word_embeddings=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
249
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCAmelCase__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCAmelCase__ = TaTokenizerFast UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCAmelCase__ = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
339
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
155
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class __lowerCAmelCase ( A ): UpperCamelCase = '''open-llama''' def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = rms_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = kwargs.pop( 'use_memorry_efficient_attention' , A) _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_dropout_prob _UpperCAmelCase = use_stable_embedding _UpperCAmelCase = shared_input_output_embedding _UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , ) def _lowerCamelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F"got {self.rope_scaling}") _UpperCAmelCase = self.rope_scaling.get('type' , A) _UpperCAmelCase = self.rope_scaling.get('factor' , A) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
339
0
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=False): try: lowercase__ : Optional[int] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : List[str] = default else: # KEY is set, convert it to True or False. try: lowercase__ : Optional[Any] = strtobool(_UpperCAmelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skip("Test was skipped")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Dict): return unittest.skipUnless(_run_slow_tests , "test is slow")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Any): return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Dict): return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Optional[Any]): return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Optional[int]): return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless( is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : str): return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Optional[Any]): return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Tuple): return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Any): return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Tuple): return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Any): return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : List[Any]): return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Optional[int]): return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Any=None , _lowerCamelCase : List[Any]=None): if test_case is None: return partial(_UpperCAmelCase , version=_UpperCAmelCase) return unittest.skipUnless(is_torch_version(">=" , _UpperCAmelCase) , f'''test requires torch version >= {version}''')(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_UpperCAmelCase) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_UpperCAmelCase) UpperCamelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowercase_ ( _lowerCamelCase : List[str]): return unittest.skipUnless( _atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_UpperCAmelCase) class snake_case_ ( unittest.TestCase ): __A : int = True @classmethod def __UpperCamelCase ( cls : List[Any] ) -> Tuple: lowercase__ : str = tempfile.mkdtemp() @classmethod def __UpperCamelCase ( cls : Union[str, Any] ) -> str: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __UpperCamelCase ( self : List[str] ) -> List[Any]: if self.clear_on_setup: for path in Path(self.tmpdir ).glob("**/*" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(lowercase_ ) class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Dict ) -> Tuple: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Optional[int] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> Tuple: lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowercase_ ( _lowerCamelCase : List[Any]): lowercase__ : Any = AcceleratorState() lowercase__ : Any = tensor[None].clone().to(state.device) lowercase__ : int = gather(_UpperCAmelCase).cpu() lowercase__ : Optional[Any] = tensor[0].cpu() for i in range(tensors.shape[0]): if not torch.equal(tensors[i] , _UpperCAmelCase): return False return True class snake_case_ : def __init__( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ) -> Optional[int]: lowercase__ : List[Any] = returncode lowercase__ : List[str] = stdout lowercase__ : int = stderr async def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[int]): while True: lowercase__ : Tuple = await stream.readline() if line: callback(_UpperCAmelCase) else: break async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : str=None , _lowerCamelCase : str=None , _lowerCamelCase : Dict=False , _lowerCamelCase : Union[str, Any]=False): if echo: print("\nRunning: " , " ".join(_UpperCAmelCase)) lowercase__ : List[Any] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : Optional[Any] = [] lowercase__ : Optional[int] = [] def tee(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : str=""): lowercase__ : Tuple = line.decode("utf-8").rstrip() sink.append(_UpperCAmelCase) if not quiet: print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label="stdout:"))), asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label="stderr:"))), ] , timeout=_UpperCAmelCase , ) return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict=None , _lowerCamelCase : str=None , _lowerCamelCase : str=180 , _lowerCamelCase : List[Any]=False , _lowerCamelCase : List[Any]=True): lowercase__ : str = asyncio.get_event_loop() lowercase__ : Optional[Any] = loop.run_until_complete( _stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase)) lowercase__ : Tuple = " ".join(_UpperCAmelCase) if result.returncode > 0: lowercase__ : List[str] = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') return result class snake_case_ ( __A ): pass def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : str=False): try: lowercase__ : Any = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT) if return_stdout: if hasattr(_UpperCAmelCase , "decode"): lowercase__ : Union[str, Any] = output.decode("utf-8") return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'''Command `{" ".join(_UpperCAmelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
87
def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') ) def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = credit_card_number _UpperCAmelCase = 0 _UpperCAmelCase = len(_UpperCAmelCase ) - 2 for i in range(_UpperCAmelCase , -1 , -2 ): # double the value of every second digit _UpperCAmelCase = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 _UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def A ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(F"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(_UpperCAmelCase ) <= 16: print(F"{error_message} of its length." ) return False if not validate_initial_digits(_UpperCAmelCase ): print(F"{error_message} of its first two digits." ) return False if not luhn_validation(_UpperCAmelCase ): print(F"{error_message} it fails the Luhn check." ) return False print(F"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("4111111111111111") validate_credit_card_number("32323")
339
0
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } _lowerCAmelCase = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } _lowerCAmelCase = {'''facebook/blenderbot_small-90M''': 512} def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Union[str, Any] = set() __UpperCamelCase : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCamelCase : Optional[Any] = char __UpperCamelCase : List[str] = set(_UpperCAmelCase ) return pairs class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = VOCAB_FILES_NAMES A = PRETRAINED_VOCAB_FILES_MAP A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A = ["input_ids", "attention_mask"] def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="__start__" , _UpperCAmelCase="__end__" , _UpperCAmelCase="__unk__" , _UpperCAmelCase="__null__" , **_UpperCAmelCase , ) -> Optional[Any]: super().__init__(unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , **_UpperCAmelCase ) with open(_UpperCAmelCase , encoding="utf-8" ) as vocab_handle: __UpperCamelCase : Any = json.load(_UpperCAmelCase ) __UpperCamelCase : List[Any] = {v: k for k, v in self.encoder.items()} with open(_UpperCAmelCase , encoding="utf-8" ) as merges_handle: __UpperCamelCase : Any = merges_handle.read().split("\n" )[1:-1] __UpperCamelCase : List[str] = [tuple(merge.split() ) for merge in merges] __UpperCamelCase : List[Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __UpperCamelCase : Tuple = {} @property def a_ (self ) -> int: return len(self.encoder ) def a_ (self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def a_ (self , _UpperCAmelCase ) -> str: if token in self.cache: return self.cache[token] __UpperCamelCase : str = re.sub("([.,!?()])" , R" \1" , _UpperCAmelCase ) __UpperCamelCase : Dict = re.sub("(\')" , R" \1 " , _UpperCAmelCase ) __UpperCamelCase : Any = re.sub(R"\s{2,}" , " " , _UpperCAmelCase ) if "\n" in token: __UpperCamelCase : int = token.replace("\n" , " __newln__" ) __UpperCamelCase : Dict = token.split(" " ) __UpperCamelCase : Optional[Any] = [] for token in tokens: if not len(_UpperCAmelCase ): continue __UpperCamelCase : List[str] = token.lower() __UpperCamelCase : int = tuple(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) __UpperCamelCase : Tuple = get_pairs(_UpperCAmelCase ) if not pairs: words.append(_UpperCAmelCase ) continue while True: __UpperCamelCase : List[Any] = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __UpperCamelCase , __UpperCamelCase : Any = bigram __UpperCamelCase : str = [] __UpperCamelCase : Optional[int] = 0 while i < len(_UpperCAmelCase ): try: __UpperCamelCase : Any = word.index(_UpperCAmelCase , _UpperCAmelCase ) new_word.extend(word[i:j] ) __UpperCamelCase : Optional[Any] = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCamelCase : int = tuple(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = new_word if len(_UpperCAmelCase ) == 1: break else: __UpperCamelCase : str = get_pairs(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = "@@ ".join(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = word[:-4] __UpperCamelCase : List[str] = word words.append(_UpperCAmelCase ) return " ".join(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : Dict = [] __UpperCamelCase : Optional[Any] = re.findall(R"\S+\n?" , _UpperCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(_UpperCAmelCase ).split(" " ) ) ) return split_tokens def a_ (self , _UpperCAmelCase ) -> int: __UpperCamelCase : Optional[int] = token.lower() return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) ) def a_ (self , _UpperCAmelCase ) -> str: return self.decoder.get(_UpperCAmelCase , self.unk_token ) def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : Optional[int] = " ".join(_UpperCAmelCase ).replace("@@ " , "" ).strip() return out_string def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(_UpperCAmelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return __UpperCamelCase : List[Any] = os.path.join( _UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __UpperCamelCase : int = os.path.join( _UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + "\n" ) __UpperCamelCase : Optional[int] = 0 with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) __UpperCamelCase : Optional[int] = token_index writer.write(" ".join(_UpperCAmelCase ) + "\n" ) index += 1 return vocab_file, merge_file
298
from functools import reduce UpperCAmelCase__ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A ( _UpperCAmelCase : str = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_UpperCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
339
0
from __future__ import annotations lowerCAmelCase = 8.988E9 # units = N * m^s * C^-2 def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> dict[str, float]: '''simple docstring''' __lowercase= abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if distance < 0: raise ValueError('Distance cannot be negative' ) if force == 0: __lowercase= COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: __lowercase= abs(_UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: __lowercase= abs(_UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: __lowercase= (COULOMBS_CONSTANT * charge_product / abs(_UpperCAmelCase )) ** 0.5 return {"distance": distance} raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
295
from __future__ import annotations from collections.abc import Callable UpperCAmelCase__ = list[list[float | int]] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for row in range(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = matrix[row][col] _UpperCAmelCase = vector[row][0] _UpperCAmelCase = 0 _UpperCAmelCase = 0 while row < size and col < size: # pivoting _UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _UpperCAmelCase ): _UpperCAmelCase = augmented[rowa][col] / augmented[row][col] _UpperCAmelCase = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _UpperCAmelCase ): for row in range(_UpperCAmelCase ): _UpperCAmelCase = augmented[row][col] / augmented[col][col] for cola in range(_UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase ) ] def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]: '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) _UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )] _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for x_val, y_val in enumerate(_UpperCAmelCase ): for col in range(_UpperCAmelCase ): _UpperCAmelCase = (x_val + 1) ** (size - col - 1) _UpperCAmelCase = y_val _UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase ) def interpolated_func(_UpperCAmelCase : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_UpperCAmelCase ) ) return interpolated_func def A ( _UpperCAmelCase : int ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int: '''simple docstring''' _UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )] _UpperCAmelCase = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _UpperCAmelCase = 0 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for poly in polynomials: _UpperCAmelCase = 1 while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ): x_val += 1 ret += poly(_UpperCAmelCase ) return ret if __name__ == "__main__": print(f"""{solution() = }""")
339
0
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def _lowerCamelCase ( lowercase : Any ) -> Tuple: _a = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) def _lowerCamelCase ( lowercase : Optional[int] ) -> str: _a = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: _a = s_dict.pop(_UpperCAmelCase ) elif "subsample" in key: _a = s_dict.pop(_UpperCAmelCase ) def _lowerCamelCase ( lowercase : List[Any] ) -> Union[str, Any]: _a , _a = emb.weight.shape _a = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) _a = emb.weight.data return lin_layer def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] ) -> str: _a = torch.load(_UpperCAmelCase , map_location="cpu" ) _a = mam_aaa["args"] _a = mam_aaa["model"] _a = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(_UpperCAmelCase ) rename_keys(_UpperCAmelCase ) _a = state_dict["decoder.embed_tokens.weight"].shape[0] _a = args.share_decoder_input_output_embed _a = [int(_UpperCAmelCase ) for i in args.conv_kernel_sizes.split("," )] _a = SpeechaTextConfig( vocab_size=_UpperCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(_UpperCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=_UpperCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_UpperCAmelCase , num_beams=5 , max_length=200 , use_cache=_UpperCAmelCase , decoder_start_token_id=2 , early_stopping=_UpperCAmelCase , ) _a = SpeechaTextForConditionalGeneration(_UpperCAmelCase ) _a , _a = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F' but all the following weights are missing {missing}' ) if tie_embeds: _a = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _a = lm_head_weights model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') lowerCAmelCase_ : Tuple = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
63
from __future__ import annotations def A ( _UpperCAmelCase : list[int] ) -> bool: '''simple docstring''' return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
0
"""simple docstring""" from datetime import datetime as dt import os from github import Github __magic_name__ = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def _lowerCAmelCase ( ): __SCREAMING_SNAKE_CASE = Github(os.environ["""GITHUB_TOKEN"""] ) __SCREAMING_SNAKE_CASE = g.get_repo("""huggingface/transformers""" ) __SCREAMING_SNAKE_CASE = repo.get_issues(state="""open""" ) for issue in open_issues: __SCREAMING_SNAKE_CASE = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase_ : i.created_at , reverse=_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = comments[0] if len(_UpperCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="""closed""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) if __name__ == "__main__": main()
100
import os UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} def A ( _UpperCAmelCase : str ) -> int: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(_UpperCAmelCase ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def A ( _UpperCAmelCase : int ) -> str: '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1_000 numerals += m_count * "M" num %= 1_000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int: '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase ) _UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase ) savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase ) return savings if __name__ == "__main__": print(f"""{solution() = }""")
339
0
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : int = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class __snake_case ( lowerCamelCase_ ): lowerCAmelCase_ = "t5" lowerCAmelCase_ = ["past_key_values"] lowerCAmelCase_ = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Optional[Any] , _lowercase : Union[str, Any]=3_21_28 , _lowercase : Optional[Any]=5_12 , _lowercase : Optional[Any]=64 , _lowercase : Union[str, Any]=20_48 , _lowercase : Tuple=6 , _lowercase : Optional[int]=None , _lowercase : List[Any]=8 , _lowercase : Dict=32 , _lowercase : str=1_28 , _lowercase : Tuple=0.1 , _lowercase : List[str]=1E-6 , _lowercase : str=1.0 , _lowercase : Optional[Any]="relu" , _lowercase : Tuple=True , _lowercase : Optional[int]=True , _lowercase : Optional[Any]=0 , _lowercase : Optional[Any]=1 , **_lowercase : Dict , ): """simple docstring""" SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = d_model SCREAMING_SNAKE_CASE__ = d_kv SCREAMING_SNAKE_CASE__ = d_ff SCREAMING_SNAKE_CASE__ = num_layers SCREAMING_SNAKE_CASE__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry SCREAMING_SNAKE_CASE__ = num_heads SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets SCREAMING_SNAKE_CASE__ = relative_attention_max_distance SCREAMING_SNAKE_CASE__ = dropout_rate SCREAMING_SNAKE_CASE__ = layer_norm_epsilon SCREAMING_SNAKE_CASE__ = initializer_factor SCREAMING_SNAKE_CASE__ = feed_forward_proj SCREAMING_SNAKE_CASE__ = use_cache SCREAMING_SNAKE_CASE__ = self.feed_forward_proj.split("""-""" ) SCREAMING_SNAKE_CASE__ = act_info[-1] SCREAMING_SNAKE_CASE__ = act_info[0] == """gated""" if len(_lowercase ) > 1 and act_info[0] != "gated" or len(_lowercase ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """\'gated-gelu\' or \'relu\'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": SCREAMING_SNAKE_CASE__ = """gelu_new""" super().__init__( pad_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , **_lowercase , ) class __snake_case ( lowerCamelCase_ ): @property def __a ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = { """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: SCREAMING_SNAKE_CASE__ = """past_encoder_sequence + sequence""" SCREAMING_SNAKE_CASE__ = {0: """batch"""} SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """decoder_sequence"""} SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(_lowercase , direction="""inputs""" ) return common_inputs @property def __a ( self : Any ): """simple docstring""" return 13
219
import requests from bsa import BeautifulSoup def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str: '''simple docstring''' _UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' ) _UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} ) _UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' ) return anchors[2].get_text() if __name__ == "__main__": UpperCAmelCase__ = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
339
0
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowercase__ ( _snake_case ): '''simple docstring''' A_ : List[Any] = """char""" A_ : List[str] = """bpe""" A_ : Tuple = """wp""" UpperCAmelCase_ : Optional[int] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowercase__ ( _snake_case ): '''simple docstring''' A_ : Tuple = ["""image_processor""", """char_tokenizer"""] A_ : Optional[int] = """ViTImageProcessor""" A_ : int = """MgpstrTokenizer""" def __init__( self , __snake_case=None , __snake_case=None , **__snake_case ): _SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __snake_case , ) _SCREAMING_SNAKE_CASE : Any = kwargs.pop("""feature_extractor""" ) _SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) _SCREAMING_SNAKE_CASE : int = tokenizer _SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained("""gpt2""" ) _SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained("""bert-base-uncased""" ) super().__init__(__snake_case , __snake_case ) def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ): if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: _SCREAMING_SNAKE_CASE : Any = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case ) if text is not None: _SCREAMING_SNAKE_CASE : Dict = self.char_tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case ) if text is None: return inputs elif images is None: return encodings else: _SCREAMING_SNAKE_CASE : Optional[Any] = encodings["""input_ids"""] return inputs def UpperCAmelCase_ ( self , __snake_case ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = sequences _SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self._decode_helper(__snake_case , """char""" ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(__snake_case , """bpe""" ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._decode_helper(__snake_case , """wp""" ) _SCREAMING_SNAKE_CASE : Any = [] _SCREAMING_SNAKE_CASE : int = [] for i in range(__snake_case ): _SCREAMING_SNAKE_CASE : int = [char_scores[i], bpe_scores[i], wp_scores[i]] _SCREAMING_SNAKE_CASE : int = [char_strs[i], bpe_strs[i], wp_strs[i]] _SCREAMING_SNAKE_CASE : List[Any] = scores.index(max(__snake_case ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _SCREAMING_SNAKE_CASE : List[str] = {} _SCREAMING_SNAKE_CASE : int = final_strs _SCREAMING_SNAKE_CASE : Dict = final_scores _SCREAMING_SNAKE_CASE : List[str] = char_strs _SCREAMING_SNAKE_CASE : Optional[int] = bpe_strs _SCREAMING_SNAKE_CASE : str = wp_strs return out def UpperCAmelCase_ ( self , __snake_case , __snake_case ): if format == DecodeType.CHARACTER: _SCREAMING_SNAKE_CASE : Any = self.char_decode _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : Union[str, Any] = """[s]""" elif format == DecodeType.BPE: _SCREAMING_SNAKE_CASE : List[Any] = self.bpe_decode _SCREAMING_SNAKE_CASE : Any = 2 _SCREAMING_SNAKE_CASE : str = """#""" elif format == DecodeType.WORDPIECE: _SCREAMING_SNAKE_CASE : int = self.wp_decode _SCREAMING_SNAKE_CASE : List[Any] = 102 _SCREAMING_SNAKE_CASE : Tuple = """[SEP]""" else: raise ValueError(f"""Format {format} is not supported.""" ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = [], [] _SCREAMING_SNAKE_CASE : Optional[int] = pred_logits.size(0 ) _SCREAMING_SNAKE_CASE : List[str] = pred_logits.size(1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = pred_logits.topk(1 , dim=-1 , largest=__snake_case , sorted=__snake_case ) _SCREAMING_SNAKE_CASE : Dict = preds_index.view(-1 , __snake_case )[:, 1:] _SCREAMING_SNAKE_CASE : Optional[Any] = decoder(__snake_case ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.softmax(__snake_case , dim=2 ).max(dim=2 ) _SCREAMING_SNAKE_CASE : Optional[Any] = preds_max_prob[:, 1:] for index in range(__snake_case ): _SCREAMING_SNAKE_CASE : Dict = preds_str[index].find(__snake_case ) _SCREAMING_SNAKE_CASE : int = preds_str[index][:pred_eos] _SCREAMING_SNAKE_CASE : Optional[Any] = preds_index[index].cpu().tolist() _SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(__snake_case ) if eos_token in pred_index else -1 _SCREAMING_SNAKE_CASE : Dict = preds_max_prob[index][: pred_eos_index + 1] _SCREAMING_SNAKE_CASE : Optional[Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__snake_case ) conf_scores.append(__snake_case ) return dec_strs, conf_scores def UpperCAmelCase_ ( self , __snake_case ): _SCREAMING_SNAKE_CASE : Tuple = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(__snake_case )] return decode_strs def UpperCAmelCase_ ( self , __snake_case ): return self.bpe_tokenizer.batch_decode(__snake_case ) def UpperCAmelCase_ ( self , __snake_case ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(__snake_case )] return decode_strs
200
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_attention_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_choices def _lowerCamelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCAmelCase = None if self.use_attention_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCAmelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCamelCase ( self : List[Any]) -> List[str]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" _UpperCAmelCase = FlaxRoFormerModelTester(self) @slow def _lowerCamelCase ( self : List[Any]) -> Dict: """simple docstring""" for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A) _UpperCAmelCase = model(np.ones((1, 1))) self.assertIsNotNone(A) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base') _UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]]) _UpperCAmelCase = model(A)[0] _UpperCAmelCase = 5_00_00 _UpperCAmelCase = (1, 6, vocab_size) self.assertEqual(output.shape , A) _UpperCAmelCase = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]]) self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
339
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __A ( a , unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Any =ShapEImgaImgPipeline UpperCamelCase__ : str =["""image"""] UpperCamelCase__ : Optional[Any] =["""image"""] UpperCamelCase__ : List[str] =[ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] UpperCamelCase__ : Any =False @property def __lowercase ( self ): """simple docstring""" return 32 @property def __lowercase ( self ): """simple docstring""" return 32 @property def __lowercase ( self ): """simple docstring""" return self.time_input_dim * 4 @property def __lowercase ( self ): """simple docstring""" return 8 @property def __lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) __UpperCamelCase : int =CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __UpperCamelCase : str =CLIPVisionModel(lowerCamelCase__ ) return model @property def __lowercase ( self ): """simple docstring""" __UpperCamelCase : int =CLIPImageProcessor( crop_size=224 , do_center_crop=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_resize=lowerCamelCase__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , ) return image_processor @property def __lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) __UpperCamelCase : List[Any] ={ 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __UpperCamelCase : Any =PriorTransformer(**lowerCamelCase__ ) return model @property def __lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) __UpperCamelCase : List[Any] ={ 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } __UpperCamelCase : Dict =ShapERenderer(**lowerCamelCase__ ) return model def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[int] =self.dummy_prior __UpperCamelCase : int =self.dummy_image_encoder __UpperCamelCase : Union[str, Any] =self.dummy_image_processor __UpperCamelCase : Optional[int] =self.dummy_renderer __UpperCamelCase : str =HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=lowerCamelCase__ , clip_sample=lowerCamelCase__ , clip_sample_range=1.0 , ) __UpperCamelCase : Optional[int] ={ 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ): """simple docstring""" __UpperCamelCase : List[str] =floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) if str(lowerCamelCase__ ).startswith('mps' ): __UpperCamelCase : Optional[int] =torch.manual_seed(lowerCamelCase__ ) else: __UpperCamelCase : int =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) __UpperCamelCase : List[Any] ={ 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Dict ='cpu' __UpperCamelCase : Tuple =self.get_dummy_components() __UpperCamelCase : List[str] =self.pipeline_class(**lowerCamelCase__ ) __UpperCamelCase : Optional[int] =pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : List[Any] =pipe(**self.get_dummy_inputs(lowerCamelCase__ ) ) __UpperCamelCase : Optional[int] =output.images[0] __UpperCamelCase : Dict =image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __UpperCamelCase : Union[str, Any] =np.array( [ 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowercase ( self ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =torch_device == 'cpu' __UpperCamelCase : Union[str, Any] =True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Union[str, Any] =self.get_dummy_components() __UpperCamelCase : List[Any] =self.pipeline_class(**lowerCamelCase__ ) __UpperCamelCase : int =pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : List[Any] =1 __UpperCamelCase : Any =2 __UpperCamelCase : Optional[Any] =self.get_dummy_inputs(lowerCamelCase__ ) for key in inputs.keys(): if key in self.batch_params: __UpperCamelCase : List[str] =batch_size * [inputs[key]] __UpperCamelCase : Optional[Any] =pipe(**lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __A ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' ) __UpperCamelCase : List[Any] =load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy' ) __UpperCamelCase : List[str] =ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' ) __UpperCamelCase : List[str] =pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : List[str] =torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __UpperCamelCase : Union[str, Any] =pipe( lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
71
UpperCAmelCase__ = {} def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int: '''simple docstring''' # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _UpperCAmelCase = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 ) _UpperCAmelCase = state_late + state_absent + state_ontime _UpperCAmelCase = prizestrings return prizestrings def A ( _UpperCAmelCase : int = 30 ) -> int: '''simple docstring''' return _calculate(_UpperCAmelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
339
0
'''simple docstring''' import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class A__ : def __init__( self : Dict , _a : Optional[Any] , _a : List[str]=99 , _a : int=13 , _a : str=7 , _a : Optional[Any]=9 , _a : List[str]=True , _a : List[str]=True , _a : List[str]=False , _a : Tuple=32 , _a : Optional[int]=5 , _a : Any=4 , _a : Any=37 , _a : Tuple=8 , _a : Optional[int]=0.1 , _a : Union[str, Any]=0.0_02 , _a : Dict=1 , _a : int=0 , _a : Optional[int]=0 , _a : Any=None , _a : Tuple=None , ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =parent _SCREAMING_SNAKE_CASE =batch_size _SCREAMING_SNAKE_CASE =encoder_seq_length _SCREAMING_SNAKE_CASE =decoder_seq_length # For common tests _SCREAMING_SNAKE_CASE =self.decoder_seq_length _SCREAMING_SNAKE_CASE =is_training _SCREAMING_SNAKE_CASE =use_attention_mask _SCREAMING_SNAKE_CASE =use_labels _SCREAMING_SNAKE_CASE =vocab_size _SCREAMING_SNAKE_CASE =hidden_size _SCREAMING_SNAKE_CASE =num_hidden_layers _SCREAMING_SNAKE_CASE =num_attention_heads _SCREAMING_SNAKE_CASE =d_ff _SCREAMING_SNAKE_CASE =relative_attention_num_buckets _SCREAMING_SNAKE_CASE =dropout_rate _SCREAMING_SNAKE_CASE =initializer_factor _SCREAMING_SNAKE_CASE =eos_token_id _SCREAMING_SNAKE_CASE =pad_token_id _SCREAMING_SNAKE_CASE =decoder_start_token_id _SCREAMING_SNAKE_CASE =None _SCREAMING_SNAKE_CASE =decoder_layers def A ( self : int ) -> Optional[int]: '''simple docstring''' return TaConfig.from_pretrained('google/umt5-base' ) def A ( self : Tuple , _a : Optional[Any] , _a : Union[str, Any] , _a : int , _a : int=None , _a : int=None , _a : Any=None , _a : Any=None , _a : str=None , ) -> str: '''simple docstring''' if attention_mask is None: _SCREAMING_SNAKE_CASE =input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _SCREAMING_SNAKE_CASE =decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _SCREAMING_SNAKE_CASE =torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_a ) if decoder_head_mask is None: _SCREAMING_SNAKE_CASE =torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_a ) if cross_attn_head_mask is None: _SCREAMING_SNAKE_CASE =torch.ones( config.num_decoder_layers , config.num_attention_heads , device=_a ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def A ( self : int ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _SCREAMING_SNAKE_CASE =input_ids.clamp(self.pad_token_id + 1 ) _SCREAMING_SNAKE_CASE =decoder_input_ids.clamp(self.pad_token_id + 1 ) _SCREAMING_SNAKE_CASE =self.get_config() _SCREAMING_SNAKE_CASE =config.num_attention_heads _SCREAMING_SNAKE_CASE =self.prepare_inputs_dict(_a , _a , _a ) return config, input_dict def A ( self : Tuple ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs() return config, inputs_dict def A ( self : Optional[int] ) -> int: '''simple docstring''' return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def A ( self : Dict ) -> Any: '''simple docstring''' return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def A ( self : str , _a : Dict , _a : str , _a : Dict , _a : int , _a : Any , _a : List[str] , ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =UMTaModel(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model( input_ids=_a , decoder_input_ids=_a , attention_mask=_a , decoder_attention_mask=_a , ) _SCREAMING_SNAKE_CASE =model(input_ids=_a , decoder_input_ids=_a ) _SCREAMING_SNAKE_CASE =result.last_hidden_state _SCREAMING_SNAKE_CASE =result.past_key_values _SCREAMING_SNAKE_CASE =result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(_a ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def A ( self : Any , _a : Optional[Any] , _a : int , _a : Dict , _a : List[Any] , _a : Any , _a : Optional[Any] , ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE =UMTaModel(config=_a ).get_decoder().to(_a ).eval() # first forward pass _SCREAMING_SNAKE_CASE =model(_a , use_cache=_a ) _SCREAMING_SNAKE_CASE =model(_a ) _SCREAMING_SNAKE_CASE =model(_a , use_cache=_a ) self.parent.assertTrue(len(_a ) == len(_a ) ) self.parent.assertTrue(len(_a ) == len(_a ) + 1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and _SCREAMING_SNAKE_CASE =torch.cat([input_ids, next_tokens] , dim=-1 ) _SCREAMING_SNAKE_CASE =model(_a )['last_hidden_state'] _SCREAMING_SNAKE_CASE =model(_a , past_key_values=_a )['last_hidden_state'] # select random slice _SCREAMING_SNAKE_CASE =ids_tensor((1,) , output_from_past.shape[-1] ).item() _SCREAMING_SNAKE_CASE =output_from_no_past[:, -1, random_slice_idx].detach() _SCREAMING_SNAKE_CASE =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) ) def A ( self : str , _a : List[Any] , _a : List[Any] , ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =UMTaModel(config=_a ).to(_a ).half().eval() _SCREAMING_SNAKE_CASE =model(**_a )['last_hidden_state'] self.parent.assertFalse(torch.isnan(_a ).any().item() ) @require_torch class A__ ( A__ , A__ , A__ , unittest.TestCase ): A__ = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) A__ = (UMTaForConditionalGeneration,) if is_torch_available() else () A__ = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) A__ = True A__ = False A__ = False A__ = True A__ = True # The small UMT5 model needs higher percentages for CPU/MP tests A__ = [0.8, 0.9] def A ( self : List[str] ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE =UMTaModelTester(self ) @unittest.skip('Test has a segmentation fault on torch 1.8.0' ) def A ( self : Tuple ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE =UMTaModel(config_and_inputs[0] ).to(_a ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( _a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"{tmpdirname}/t5_test.onnx" , export_params=_a , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def A ( self : Dict ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*_a ) def A ( self : Tuple ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE =['encoder_attentions', 'decoder_attentions', 'cross_attentions'] _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE =config_and_inputs[0] _SCREAMING_SNAKE_CASE =UMTaForConditionalGeneration(_a ).eval() model.to(_a ) _SCREAMING_SNAKE_CASE ={ 'head_mask': torch.zeros(config.num_layers , config.num_heads , device=_a ), 'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ), 'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ), } for attn_name, (name, mask) in zip(_a , head_masking.items() ): _SCREAMING_SNAKE_CASE ={name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _SCREAMING_SNAKE_CASE =torch.ones( config.num_decoder_layers , config.num_heads , device=_a ) _SCREAMING_SNAKE_CASE =model.generate( config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=_a , return_dict_in_generate=_a , **_a , ) # We check the state of decoder_attentions and cross_attentions just from the last step _SCREAMING_SNAKE_CASE =out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' ) def A ( self : List[Any] ) -> Tuple: '''simple docstring''' pass @require_torch @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase ): @slow @unittest.skip( 'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' ) def A ( self : Any ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE =UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=_a ).to(_a ) _SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=_a , legacy=_a ) _SCREAMING_SNAKE_CASE =[ 'Bonjour monsieur <extra_id_0> bien <extra_id_1>.', 'No se como puedo <extra_id_0>.', 'This is the reason why we <extra_id_0> them.', 'The <extra_id_0> walks in <extra_id_1>, seats', 'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.', ] _SCREAMING_SNAKE_CASE =tokenizer(_a , return_tensors='pt' , padding=_a ).input_ids # fmt: off _SCREAMING_SNAKE_CASE =torch.tensor( [ [ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(_a , _a ) _SCREAMING_SNAKE_CASE =model.generate(input_ids.to(_a ) ) _SCREAMING_SNAKE_CASE =[ '<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>', '<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', ] _SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a ) self.assertEqual(_a , _a )
47
import os import sys import unittest UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers") class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = find_backend(' if not is_torch_available():') self.assertEqual(A , 'torch') # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):') self.assertEqual(A , 'torch_and_transformers') # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _UpperCAmelCase = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):') self.assertEqual(A , 'torch_and_transformers_and_onnx') def _lowerCamelCase ( self : int) -> Dict: """simple docstring""" _UpperCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , A) self.assertIn('torch_and_transformers' , A) self.assertIn('flax_and_transformers' , A) self.assertIn('torch_and_transformers_and_onnx' , A) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch']) self.assertIn('FlaxUNet2DConditionModel' , objects['flax']) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers']) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers']) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy']) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx']) def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'') self.assertEqual(A , '\nCONSTANT = None\n') _UpperCAmelCase = create_dummy_object('function' , '\'torch\'') self.assertEqual( A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n') _UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' _UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'') self.assertEqual(A , A) def _lowerCamelCase ( self : Dict) -> int: """simple docstring""" _UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' _UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']}) self.assertEqual(dummy_files['torch'] , A)
339
0
"""simple docstring""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="data2vec-audio" def __init__( self , UpperCamelCase_=32 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-5 , UpperCamelCase_="gelu" , UpperCamelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase_=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase_=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase_=False , UpperCamelCase_=16 , UpperCamelCase_=19 , UpperCamelCase_=5 , UpperCamelCase_=0.0_5 , UpperCamelCase_=10 , UpperCamelCase_=2 , UpperCamelCase_=0.0 , UpperCamelCase_=10 , UpperCamelCase_=0 , UpperCamelCase_="sum" , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=2_56 , UpperCamelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) , UpperCamelCase_=(5, 3, 3, 1, 1) , UpperCamelCase_=(1, 2, 3, 1, 1) , UpperCamelCase_=5_12 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=False , UpperCamelCase_=3 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Tuple: super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ ) __lowercase : Union[str, Any] = hidden_size __lowercase : Optional[Any] = feat_extract_activation __lowercase : Any = list(UpperCamelCase_ ) __lowercase : List[str] = list(UpperCamelCase_ ) __lowercase : Optional[int] = list(UpperCamelCase_ ) __lowercase : Dict = conv_bias __lowercase : Optional[int] = num_conv_pos_embeddings __lowercase : Any = num_conv_pos_embedding_groups __lowercase : str = conv_pos_kernel_size __lowercase : Dict = len(self.conv_dim ) __lowercase : str = num_hidden_layers __lowercase : List[str] = intermediate_size __lowercase : Tuple = hidden_act __lowercase : str = num_attention_heads __lowercase : Dict = hidden_dropout __lowercase : Optional[Any] = attention_dropout __lowercase : int = activation_dropout __lowercase : List[str] = feat_proj_dropout __lowercase : Union[str, Any] = final_dropout __lowercase : List[str] = layerdrop __lowercase : Tuple = layer_norm_eps __lowercase : str = initializer_range __lowercase : Optional[int] = vocab_size __lowercase : Union[str, Any] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowercase : int = mask_time_prob __lowercase : int = mask_time_length __lowercase : Tuple = mask_time_min_masks __lowercase : List[Any] = mask_feature_prob __lowercase : Union[str, Any] = mask_feature_length __lowercase : List[str] = mask_feature_min_masks # ctc loss __lowercase : Optional[Any] = ctc_loss_reduction __lowercase : Any = ctc_zero_infinity # adapter __lowercase : int = add_adapter __lowercase : int = adapter_kernel_size __lowercase : Union[str, Any] = adapter_stride __lowercase : List[str] = num_adapter_layers __lowercase : Optional[int] = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. __lowercase : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __lowercase : Union[str, Any] = list(UpperCamelCase_ ) __lowercase : int = list(UpperCamelCase_ ) __lowercase : str = list(UpperCamelCase_ ) __lowercase : Optional[int] = xvector_output_dim @property def _lowerCamelCase ( self ) -> List[Any]: return math.prod(self.conv_stride )
249
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") UpperCAmelCase__ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) UpperCamelCase = field( default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , ) UpperCamelCase = field( default=1_0_2_4 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} ) def _lowerCamelCase ( self : str) -> List[Any]: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.') else: _UpperCAmelCase = self.train_file.split('.')[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." _UpperCAmelCase = self.validation_file.split('.')[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __lowerCAmelCase : UpperCamelCase = field( default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase = field( default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase = field( default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase = field( default=A , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def A ( ) -> Optional[int]: '''simple docstring''' # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) datasets.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. _UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: _UpperCAmelCase = data_args.train_file.split('.' )[-1] _UpperCAmelCase = data_args.test_file.split('.' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." _UpperCAmelCase = data_args.test_file else: raise ValueError('Need either a GLUE task or a test file for `do_predict`.' ) for key in data_files.keys(): logger.info(F"load a local file for {key}: {data_files[key]}" ) if data_args.train_file.endswith('.csv' ): # Loading a dataset from local csv files _UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files _UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels _UpperCAmelCase = raw_datasets['train'].features['label'].names _UpperCAmelCase = len(_UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer _UpperCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , ) _UpperCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: _UpperCAmelCase = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _UpperCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. _UpperCAmelCase = {'Refused': 0, 'Entailed': 1} _UpperCAmelCase = {0: 'Refused', 1: 'Entailed'} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ): # Tokenize the texts def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ): _UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )] _UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd _UpperCAmelCase = examples['statement'] _UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) ) _UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ) _UpperCAmelCase = examples['label'] return result with training_args.main_process_first(desc='dataset map pre-processing' ): _UpperCAmelCase = raw_datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) _UpperCAmelCase = raw_datasets['train'] if data_args.max_train_samples is not None: _UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) _UpperCAmelCase = raw_datasets['validation'] if data_args.max_eval_samples is not None: _UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('--do_predict requires a test dataset' ) _UpperCAmelCase = raw_datasets['test'] if data_args.max_predict_samples is not None: _UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ): logger.info(F"Sample {index} of the training set: {train_dataset[index]}." ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCAmelCase : EvalPrediction ): _UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _UpperCAmelCase = default_data_collator elif training_args.fpaa: _UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) else: _UpperCAmelCase = None # Initialize our Trainer _UpperCAmelCase = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) _UpperCAmelCase = train_result.metrics _UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase ) ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , _UpperCAmelCase ) trainer.save_metrics('train' , _UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase ) _UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase ) _UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.log_metrics('eval' , _UpperCAmelCase ) trainer.save_metrics('eval' , _UpperCAmelCase ) if training_args.do_predict: logger.info('*** Predict ***' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. _UpperCAmelCase = predict_dataset.remove_columns('label' ) _UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions _UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 ) _UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' ) if trainer.is_world_process_zero(): with open(_UpperCAmelCase , 'w' ) as writer: logger.info('***** Predict Results *****' ) writer.write('index\tprediction\n' ) for index, item in enumerate(_UpperCAmelCase ): _UpperCAmelCase = label_list[item] writer.write(F"{index}\t{item}\n" ) _UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'} if training_args.push_to_hub: trainer.push_to_hub(**_UpperCAmelCase ) else: trainer.create_model_card(**_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
0
"""simple docstring""" import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def lowercase (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ) -> str: '''simple docstring''' assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowercase (snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[int] ) -> Any: '''simple docstring''' lowerCAmelCase = tmp_path / """cache""" lowerCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase = SqlDatasetReader( """dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase ).read() _check_sql_dataset(_UpperCAmelCase , _UpperCAmelCase ) @require_sqlalchemy @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowercase (snake_case__ : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase = tmp_path / """cache""" lowerCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} lowerCAmelCase = features.copy() if features else default_expected_features lowerCAmelCase = ( Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read() _check_sql_dataset(_UpperCAmelCase , _UpperCAmelCase ) def lowercase (snake_case__ : Any ) -> int: '''simple docstring''' with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con: lowerCAmelCase = con.cursor() cur.execute("""SELECT * FROM dataset""" ) for row in cur: yield row @require_sqlalchemy def lowercase (snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Dict ) -> Dict: '''simple docstring''' lowerCAmelCase = tmp_path / """cache""" lowerCAmelCase = os.path.join(_UpperCAmelCase , """tmp.sql""" ) lowerCAmelCase = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_UpperCAmelCase ).read() SqlDatasetWriter(_UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write() lowerCAmelCase = iter_sql_file(_UpperCAmelCase ) lowerCAmelCase = iter_sql_file(_UpperCAmelCase ) for rowa, rowa in zip(_UpperCAmelCase , _UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def lowercase (snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase = tmp_path / """cache""" lowerCAmelCase = os.path.join(_UpperCAmelCase , """tmp.sql""" ) lowerCAmelCase = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_UpperCAmelCase ).read() SqlDatasetWriter(_UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write() lowerCAmelCase = iter_sql_file(_UpperCAmelCase ) lowerCAmelCase = iter_sql_file(_UpperCAmelCase ) for rowa, rowa in zip(_UpperCAmelCase , _UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def lowercase (snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase = tmp_path / """cache""" lowerCAmelCase = os.path.join(_UpperCAmelCase , """tmp.sql""" ) lowerCAmelCase = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_UpperCAmelCase ).read() with pytest.raises(_UpperCAmelCase ): SqlDatasetWriter(_UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
155
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any: '''simple docstring''' _UpperCAmelCase = multiprocessing.Manager() _UpperCAmelCase = manager.list() _UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('timed out' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil _UpperCAmelCase = shutil.rmtree _UpperCAmelCase = os.rmdir _UpperCAmelCase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: _UpperCAmelCase = {} with swallow_io(): with time_limit(_UpperCAmelCase ): exec(_UpperCAmelCase , _UpperCAmelCase ) result.append('passed' ) except TimeoutException: result.append('timed out' ) except BaseException as e: result.append(F"failed: {e}" ) # Needed for cleaning up. _UpperCAmelCase = rmtree _UpperCAmelCase = rmdir _UpperCAmelCase = chdir @contextlib.contextmanager def A ( _UpperCAmelCase : Union[str, Any] ) -> Any: '''simple docstring''' def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ): raise TimeoutException('Timed out!' ) signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase ) signal.signal(signal.SIGALRM , _UpperCAmelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def A ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = WriteOnlyStringIO() with contextlib.redirect_stdout(_UpperCAmelCase ): with contextlib.redirect_stderr(_UpperCAmelCase ): with redirect_stdin(_UpperCAmelCase ): yield @contextlib.contextmanager def A ( ) -> Any: '''simple docstring''' with tempfile.TemporaryDirectory() as dirname: with chdir(_UpperCAmelCase ): yield dirname class __lowerCAmelCase ( A ): pass class __lowerCAmelCase ( io.StringIO ): def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any: """simple docstring""" raise OSError def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]: """simple docstring""" raise OSError def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]: """simple docstring""" raise OSError def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]: """simple docstring""" return False class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore UpperCamelCase = '''stdin''' @contextlib.contextmanager def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' if root == ".": yield return _UpperCAmelCase = os.getcwd() os.chdir(_UpperCAmelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str]=None ) -> Any: '''simple docstring''' if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins _UpperCAmelCase = None _UpperCAmelCase = None import os _UpperCAmelCase = '1' _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import shutil _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None import subprocess _UpperCAmelCase = None # type: ignore _UpperCAmelCase = None import sys _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None
339
0
def lowercase_ ( _lowerCamelCase : int): return str(_UpperCAmelCase) == str(_UpperCAmelCase)[::-1] def lowercase_ ( _lowerCamelCase : int): return int(_UpperCAmelCase) + int(str(_UpperCAmelCase)[::-1]) def lowercase_ ( _lowerCamelCase : int = 1_0000): lowercase__ : Any = [] for num in range(1 , _UpperCAmelCase): lowercase__ : str = 0 lowercase__ : Optional[int] = num while iterations < 50: lowercase__ : str = sum_reverse(_UpperCAmelCase) iterations += 1 if is_palindrome(_UpperCAmelCase): break else: lychrel_nums.append(_UpperCAmelCase) return len(_UpperCAmelCase) if __name__ == "__main__": print(f"{solution() = }")
87
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str: '''simple docstring''' try: _UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: _UpperCAmelCase = strtobool(_UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False) def A ( _UpperCAmelCase : List[str] ) -> List[str]: '''simple docstring''' return unittest.skip('Test was skipped' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> str: '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> str: '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> Dict: '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> List[str]: '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : str ) -> str: '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> str: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> int: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> Any: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict: '''simple docstring''' if test_case is None: return partial(_UpperCAmelCase , version=_UpperCAmelCase ) return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> int: '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase ) UpperCAmelCase__ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def A ( _UpperCAmelCase : List[str] ) -> Any: '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase ) class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase = True @classmethod def _lowerCamelCase ( cls : List[Any]) -> Tuple: """simple docstring""" _UpperCAmelCase = tempfile.mkdtemp() @classmethod def _lowerCamelCase ( cls : Union[str, Any]) -> str: """simple docstring""" if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def _lowerCamelCase ( self : List[str]) -> List[Any]: """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir).glob('**/*'): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A) class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Dict) -> Tuple: """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple: """simple docstring""" _UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def A ( _UpperCAmelCase : List[Any] ) -> int: '''simple docstring''' _UpperCAmelCase = AcceleratorState() _UpperCAmelCase = tensor[None].clone().to(state.device ) _UpperCAmelCase = gather(_UpperCAmelCase ).cpu() _UpperCAmelCase = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _UpperCAmelCase ): return False return True class __lowerCAmelCase : def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]: """simple docstring""" _UpperCAmelCase = returncode _UpperCAmelCase = stdout _UpperCAmelCase = stderr async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]: '''simple docstring''' while True: _UpperCAmelCase = await stream.readline() if line: callback(_UpperCAmelCase ) else: break async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput: '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(_UpperCAmelCase ) ) _UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _UpperCAmelCase = [] _UpperCAmelCase = [] def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ): _UpperCAmelCase = line.decode('utf-8' ).rstrip() sink.append(_UpperCAmelCase ) if not quiet: print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ), ] , timeout=_UpperCAmelCase , ) return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase ) def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput: '''simple docstring''' _UpperCAmelCase = asyncio.get_event_loop() _UpperCAmelCase = loop.run_until_complete( _stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) ) _UpperCAmelCase = ' '.join(_UpperCAmelCase ) if result.returncode > 0: _UpperCAmelCase = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class __lowerCAmelCase ( A ): pass def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple: '''simple docstring''' try: _UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_UpperCAmelCase , 'decode' ): _UpperCAmelCase = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
339
0
'''simple docstring''' def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Any = set() # edges = list of graph's edges __UpperCamelCase : Any = get_edges(_UpperCAmelCase ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: __UpperCamelCase , __UpperCamelCase : int = edges.pop() chosen_vertices.add(_UpperCAmelCase ) chosen_vertices.add(_UpperCAmelCase ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(_UpperCAmelCase ) return chosen_vertices def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Tuple = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
298
from __future__ import annotations UpperCAmelCase__ = list[list[int]] # assigning initial values to the grid UpperCAmelCase__ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase__ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool: '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def A ( _UpperCAmelCase : Matrix ) -> Matrix | None: '''simple docstring''' if location := find_empty_location(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): _UpperCAmelCase = digit if sudoku(_UpperCAmelCase ) is not None: return grid _UpperCAmelCase = 0 return None def A ( _UpperCAmelCase : Matrix ) -> None: '''simple docstring''' for row in grid: for cell in row: print(_UpperCAmelCase , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") UpperCAmelCase__ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
339
0
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCAmelCase = version.parse(importlib_metadata.version('''nltk''')) if NLTK_VERSION >= version.Version('''3.6.4'''): from nltk import word_tokenize lowerCAmelCase = '''\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n''' lowerCAmelCase = '''\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n''' lowerCAmelCase = '''\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def _A (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def _A (self , lowerCAmelCase ): import nltk nltk.download('wordnet' ) if NLTK_VERSION >= version.Version('3.6.5' ): nltk.download('punkt' ) if NLTK_VERSION >= version.Version('3.6.6' ): nltk.download('omw-1.4' ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=0.9 , lowerCAmelCase=3 , lowerCAmelCase=0.5 ): if NLTK_VERSION >= version.Version('3.6.5' ): __lowercase= [ meteor_score.single_meteor_score( word_tokenize(lowerCAmelCase ) , word_tokenize(lowerCAmelCase ) , alpha=lowerCAmelCase , beta=lowerCAmelCase , gamma=lowerCAmelCase ) for ref, pred in zip(lowerCAmelCase , lowerCAmelCase ) ] else: __lowercase= [ meteor_score.single_meteor_score(lowerCAmelCase , lowerCAmelCase , alpha=lowerCAmelCase , beta=lowerCAmelCase , gamma=lowerCAmelCase ) for ref, pred in zip(lowerCAmelCase , lowerCAmelCase ) ] return {"meteor": np.mean(lowerCAmelCase )}
295
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : List[Any]) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Value('string' , id='sequence'), }) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]: """simple docstring""" import nltk nltk.download('wordnet') if NLTK_VERSION >= version.Version('3.6.5'): nltk.download('punkt') if NLTK_VERSION >= version.Version('3.6.6'): nltk.download('omw-1.4') def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any: """simple docstring""" if NLTK_VERSION >= version.Version('3.6.5'): _UpperCAmelCase = [ meteor_score.single_meteor_score( word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] else: _UpperCAmelCase = [ meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] return {"meteor": np.mean(A)}
339
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" @slow def UpperCamelCase__ ( self : int ): _a = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) _a = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _a = model(__a )["last_hidden_state"] _a = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , __a ) # compare the actual values for a slice. _a = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
63
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration UpperCAmelCase__ = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' _UpperCAmelCase = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def A ( _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = list(s_dict.keys() ) for key in keys: _UpperCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: _UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase ) print(F"{key} -> {new_key}" ) _UpperCAmelCase = s_dict.pop(_UpperCAmelCase ) return s_dict def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = emb.weight.shape _UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) _UpperCAmelCase = emb.weight.data return lin_layer def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes: '''simple docstring''' os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) _UpperCAmelCase = os.path.basename(_UpperCAmelCase ) _UpperCAmelCase = url.split('/' )[-2] _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ): raise RuntimeError(F"{download_target} exists and is not a regular file" ) if os.path.isfile(_UpperCAmelCase ): _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop: while True: _UpperCAmelCase = source.read(8_192 ) if not buffer: break output.write(_UpperCAmelCase ) loop.update(len(_UpperCAmelCase ) ) _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' if ".pt" not in checkpoint_path: _UpperCAmelCase = _download(_MODELS[checkpoint_path] ) else: _UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' ) _UpperCAmelCase = original_checkpoint['dims'] _UpperCAmelCase = original_checkpoint['model_state_dict'] _UpperCAmelCase = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(_UpperCAmelCase ) rename_keys(_UpperCAmelCase ) _UpperCAmelCase = True _UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0] _UpperCAmelCase = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) _UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F" but all the following weights are missing {missing}" ) if tie_embeds: _UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCAmelCase = proj_out_weights model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
339
0
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ = { "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
100
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ): UpperCamelCase = None UpperCamelCase = None class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ): UpperCamelCase = datasets.Audio() UpperCamelCase = '''audio''' UpperCamelCase = AudioFolderConfig UpperCamelCase = 42 # definition at the bottom of the script UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' ) UpperCAmelCase__ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] UpperCAmelCase__ = AUDIO_EXTENSIONS
339
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase : str = { '''configuration_roberta_prelayernorm''': [ '''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaPreLayerNormConfig''', '''RobertaPreLayerNormOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = [ '''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaPreLayerNormForCausalLM''', '''RobertaPreLayerNormForMaskedLM''', '''RobertaPreLayerNormForMultipleChoice''', '''RobertaPreLayerNormForQuestionAnswering''', '''RobertaPreLayerNormForSequenceClassification''', '''RobertaPreLayerNormForTokenClassification''', '''RobertaPreLayerNormModel''', '''RobertaPreLayerNormPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = [ '''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaPreLayerNormForCausalLM''', '''TFRobertaPreLayerNormForMaskedLM''', '''TFRobertaPreLayerNormForMultipleChoice''', '''TFRobertaPreLayerNormForQuestionAnswering''', '''TFRobertaPreLayerNormForSequenceClassification''', '''TFRobertaPreLayerNormForTokenClassification''', '''TFRobertaPreLayerNormMainLayer''', '''TFRobertaPreLayerNormModel''', '''TFRobertaPreLayerNormPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ '''FlaxRobertaPreLayerNormForCausalLM''', '''FlaxRobertaPreLayerNormForMaskedLM''', '''FlaxRobertaPreLayerNormForMultipleChoice''', '''FlaxRobertaPreLayerNormForQuestionAnswering''', '''FlaxRobertaPreLayerNormForSequenceClassification''', '''FlaxRobertaPreLayerNormForTokenClassification''', '''FlaxRobertaPreLayerNormModel''', '''FlaxRobertaPreLayerNormPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys __lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
219
import sys from collections import defaultdict class __lowerCAmelCase : def __init__( self : int) -> str: """simple docstring""" _UpperCAmelCase = [] def _lowerCamelCase ( self : Any , A : List[str]) -> int: """simple docstring""" return self.node_position[vertex] def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = pos def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCAmelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCAmelCase = 2 * start + 1 else: _UpperCAmelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child] _UpperCAmelCase , _UpperCAmelCase = ( heap[start], positions[start], ) _UpperCAmelCase , _UpperCAmelCase = temp, tempa _UpperCAmelCase = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , A) self.top_to_bottom(A , A , A , A) def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any: """simple docstring""" _UpperCAmelCase = position[index] while index != 0: _UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _UpperCAmelCase = heap[parent] _UpperCAmelCase = position[parent] self.set_position(position[parent] , A) else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , A) break _UpperCAmelCase = parent else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(A , 0) def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str: """simple docstring""" _UpperCAmelCase = len(A) // 2 - 1 for i in range(A , -1 , -1): self.top_to_bottom(A , A , len(A) , A) def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]: """simple docstring""" _UpperCAmelCase = positions[0] _UpperCAmelCase = sys.maxsize self.top_to_bottom(A , 0 , len(A) , A) return temp def A ( _UpperCAmelCase : int ) -> Any: '''simple docstring''' _UpperCAmelCase = Heap() _UpperCAmelCase = [0] * len(_UpperCAmelCase ) _UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCAmelCase = [] for vertex in range(len(_UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_UpperCAmelCase ) heap.node_position.append(_UpperCAmelCase ) _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCAmelCase = 0 _UpperCAmelCase = distance heap.heapify(_UpperCAmelCase , _UpperCAmelCase ) for _ in range(1 , len(_UpperCAmelCase ) ): _UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCAmelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_UpperCAmelCase )] ): _UpperCAmelCase = distance heap.bottom_to_top( _UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
339
0
'''simple docstring''' from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder UpperCAmelCase_ : str = datasets.utils.logging.get_logger(__name__) class lowercase__ ( folder_based_builder.FolderBasedBuilderConfig ): '''simple docstring''' A_ : Dict = None A_ : Dict = None class lowercase__ ( folder_based_builder.FolderBasedBuilder ): '''simple docstring''' A_ : Union[str, Any] = datasets.Audio() A_ : Dict = """audio""" A_ : Any = AudioFolderConfig A_ : List[str] = 42 # definition at the bottom of the script A_ : List[Any] = AudioClassification(audio_column="""audio""" , label_column="""label""" ) UpperCAmelCase_ : List[str] = [ '.aiff', '.au', '.avr', '.caf', '.flac', '.htk', '.svx', '.mat4', '.mat5', '.mpc2k', '.ogg', '.paf', '.pvf', '.raw', '.rf64', '.sd2', '.sds', '.ircam', '.voc', '.w64', '.wav', '.nist', '.wavex', '.wve', '.xi', '.mp3', '.opus', ] UpperCAmelCase_ : int = AUDIO_EXTENSIONS
200
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]: '''simple docstring''' # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 _UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1 _UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple _UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _UpperCAmelCase = logits[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=0 ) _UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 ) _UpperCAmelCase = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] ) _UpperCAmelCase = tokenizer.mask_token _UpperCAmelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): _UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(_UpperCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base") UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() UpperCAmelCase__ = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
339
0
import sys from collections import defaultdict class __A : """simple docstring""" def __init__( self ): """simple docstring""" __UpperCamelCase : Optional[int] =[] def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" return self.node_position[vertex] def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Any =pos def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __UpperCamelCase : List[str] =2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __UpperCamelCase : List[Any] =2 * start + 1 else: __UpperCamelCase : Optional[int] =2 * start + 2 if heap[smallest_child] < heap[start]: __UpperCamelCase , __UpperCamelCase : Dict =heap[smallest_child], positions[smallest_child] __UpperCamelCase , __UpperCamelCase : int =( heap[start], positions[start], ) __UpperCamelCase , __UpperCamelCase : List[Any] =temp, tempa __UpperCamelCase : Optional[Any] =self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , lowerCamelCase__ ) self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Optional[int] =position[index] while index != 0: __UpperCamelCase : List[str] =int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __UpperCamelCase : Optional[int] =heap[parent] __UpperCamelCase : str =position[parent] self.set_position(position[parent] , lowerCamelCase__ ) else: __UpperCamelCase : Dict =val __UpperCamelCase : Union[str, Any] =temp self.set_position(lowerCamelCase__ , lowerCamelCase__ ) break __UpperCamelCase : List[Any] =parent else: __UpperCamelCase : Any =val __UpperCamelCase : str =temp self.set_position(lowerCamelCase__ , 0 ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : int =len(lowerCamelCase__ ) // 2 - 1 for i in range(lowerCamelCase__ , -1 , -1 ): self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Dict =positions[0] __UpperCamelCase : List[Any] =sys.maxsize self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ ) return temp def A ( a_ ) -> Any: __UpperCamelCase : List[str] =Heap() __UpperCamelCase : str =[0] * len(_UpperCAmelCase ) __UpperCamelCase : Optional[int] =[-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __UpperCamelCase : List[Any] =[] # Heap of Distance of vertices from their neighboring vertex __UpperCamelCase : int =[] for vertex in range(len(_UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_UpperCAmelCase ) heap.node_position.append(_UpperCAmelCase ) __UpperCamelCase : str =[] __UpperCamelCase : Union[str, Any] =1 __UpperCamelCase : Tuple =sys.maxsize for neighbor, distance in adjacency_list[0]: __UpperCamelCase : Union[str, Any] =0 __UpperCamelCase : str =distance heap.heapify(_UpperCAmelCase ,_UpperCAmelCase ) for _ in range(1 ,len(_UpperCAmelCase ) ): __UpperCamelCase : Any =heap.delete_minimum(_UpperCAmelCase ,_UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __UpperCamelCase : Optional[Any] =1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_UpperCAmelCase )] ): __UpperCamelCase : int =distance heap.bottom_to_top( _UpperCAmelCase ,heap.get_position(_UpperCAmelCase ) ,_UpperCAmelCase ,_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] =vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > A_ :int = int(input('''Enter number of edges: ''').strip()) A_ :Tuple = defaultdict(list) for _ in range(edges_number): A_ :Tuple = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
71
import math import unittest def A ( _UpperCAmelCase : int ) -> bool: '''simple docstring''' assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(11)) self.assertTrue(is_prime(13)) self.assertTrue(is_prime(17)) self.assertTrue(is_prime(19)) self.assertTrue(is_prime(23)) self.assertTrue(is_prime(29)) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" with self.assertRaises(A): is_prime(-19) self.assertFalse( is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , ) self.assertFalse( is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , ) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
339
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowercase__ ( _UpperCAmelCase ): a_ =["""image_processor""", """tokenizer"""] a_ ="""CLIPImageProcessor""" a_ =("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> int: '''simple docstring''' lowerCAmelCase__ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCAmelCase , ) lowerCAmelCase__ = kwargs.pop("feature_extractor" ) lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Optional[Any]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: lowerCAmelCase__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> str: '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def UpperCAmelCase ( self )-> str: '''simple docstring''' lowerCAmelCase__ = self.tokenizer.model_input_names lowerCAmelCase__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase ( self )-> Dict: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , ) return self.image_processor_class @property def UpperCAmelCase ( self )-> Optional[Any]: '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , ) return self.image_processor
340
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a_ = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class lowercase__ ( _UpperCAmelCase ): def __init__( self , **__UpperCAmelCase )-> List[str]: '''simple docstring''' super().__init__(**__UpperCAmelCase ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int: '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]: '''simple docstring''' lowerCAmelCase__ = {} if "candidate_labels" in kwargs: lowerCAmelCase__ = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowerCAmelCase__ = kwargs["hypothesis_template"] return preprocess_params, {}, {} def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = load_image(__UpperCAmelCase ) lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework ) lowerCAmelCase__ = candidate_labels lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels] lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase ) lowerCAmelCase__ = [text_inputs] return inputs def UpperCAmelCase ( self , __UpperCAmelCase )-> int: '''simple docstring''' lowerCAmelCase__ = model_inputs.pop("candidate_labels" ) lowerCAmelCase__ = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , __UpperCAmelCase ): lowerCAmelCase__ = text_inputs[0] else: # Batching case. lowerCAmelCase__ = text_inputs[0][0] lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple: '''simple docstring''' lowerCAmelCase__ = model_outputs.pop("candidate_labels" ) lowerCAmelCase__ = model_outputs["logits"][0] if self.framework == "pt": lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCAmelCase__ = probs.tolist() if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ = [scores] elif self.framework == "tf": lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 ) lowerCAmelCase__ = probs.numpy().tolist() else: raise ValueError(F"Unsupported framework: {self.framework}" ) lowerCAmelCase__ = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] ) ] return result
340
1